diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 7822d8ef2b3..1e4d449a9a5 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,40 +1,46 @@ <!-- -GitHub is reserved for bug reports and feature requests. The best place -to ask a general question is at the Elastic Discourse forums at -https://discuss.elastic.co. If you are in fact posting a bug report or -a feature request, please include one and only one of the below blocks -in your new issue. Note that whether you're filing a bug report or a -feature request, ensure that your submission is for an -[OS that we support](https://www.elastic.co/support/matrix#show_os). -Bug reports on an OS that we do not support or feature requests -specific to an OS that we do not support will be closed. + +** Please read the guidelines below. ** + +Issues that do not follow these guidelines are likely to be closed. + +1. GitHub is reserved for bug reports and feature requests. The best place to + ask a general question is at the Elastic [forums](https://discuss.elastic.co). + GitHub is not the place for general questions. + +2. Is this bug report or feature request for a supported OS? If not, it + is likely to be closed. See https://www.elastic.co/support/matrix#show_os + +3. Please fill out EITHER the feature request block or the bug report block + below, and delete the other block. + --> -<!-- -If you are filing a bug report, please remove the below feature -request block and provide responses for all of the below items. ---> +<!-- Feature request --> + +**Describe the feature**: + +<!-- Bug report --> **Elasticsearch version**: **Plugins installed**: [] -**JVM version**: +**JVM version** (`java -version`): -**OS version**: +**OS version** (`uname -a` if on a Unix-like system): **Description of the problem including expected versus actual behavior**: **Steps to reproduce**: + +Please include a *minimal* but *complete* recreation of the problem, including +(e.g.) index creation, mappings, settings, query etc. The easier you make for +us to reproduce it, the more likely that somebody will take the time to look at it. + 1. 2. 3. **Provide logs (if relevant)**: -<!-- -If you are filing a feature request, please remove the above bug -report block and provide responses for all of the below items. ---> - -**Describe the feature**: diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 216100c07da..35004570224 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -350,7 +350,7 @@ These are the linux flavors the Vagrantfile currently supports: * debian-8 aka jessie, the current debian stable distribution * centos-6 * centos-7 -* fedora-24 +* fedora-25 * oel-6 aka Oracle Enterprise Linux 6 * oel-7 aka Oracle Enterprise Linux 7 * sles-12 @@ -426,23 +426,23 @@ sudo -E bats $BATS_TESTS/*.bats You can also use Gradle to prepare the test environment and then starts a single VM: ------------------------------------------------- -gradle vagrantFedora24#up +gradle vagrantFedora25#up ------------------------------------------------- -Or any of vagrantCentos6#up, vagrantDebian8#up, vagrantFedora24#up, vagrantOel6#up, -vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1404#up, -vagrantUbuntu1604#up. +Or any of vagrantCentos6#up, vagrantCentos7#up, vagrantDebian8#up, +vagrantFedora25#up, vagrantOel6#up, vagrantOel7#up, vagrantOpensuse13#up, +vagrantSles12#up, vagrantUbuntu1404#up, vagrantUbuntu1604#up. Once up, you can then connect to the VM using SSH from the elasticsearch directory: ------------------------------------------------- -vagrant ssh fedora-24 +vagrant ssh fedora-25 ------------------------------------------------- Or from another directory: ------------------------------------------------- -VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-24 +VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-25 ------------------------------------------------- Note: Starting vagrant VM outside of the elasticsearch folder requires to diff --git a/Vagrantfile b/Vagrantfile index a818d666655..00cc9bd638f 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -56,8 +56,8 @@ Vagrant.configure(2) do |config| config.vm.box = "elastic/oraclelinux-7-x86_64" rpm_common config end - config.vm.define "fedora-24" do |config| - config.vm.box = "elastic/fedora-24-x86_64" + config.vm.define "fedora-25" do |config| + config.vm.box = "elastic/fedora-25-x86_64" dnf_common config end config.vm.define "opensuse-13" do |config| diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java index 4d8f7cfeaac..591fa400d18 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -36,8 +36,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.gateway.GatewayAllocator; import java.lang.reflect.InvocationTargetException; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -49,7 +47,7 @@ public final class Allocators { public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); protected NoopGatewayAllocator() { - super(Settings.EMPTY, null, null); + super(Settings.EMPTY); } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 3acf865f7bc..64ebba7578c 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -459,27 +459,11 @@ class BuildPlugin implements Plugin<Project> { // TODO: why are we not passing maxmemory to junit4? jvmArg '-Xmx' + System.getProperty('tests.heap.size', '512m') jvmArg '-Xms' + System.getProperty('tests.heap.size', '512m') - if (JavaVersion.current().isJava7()) { - // some tests need a large permgen, but that only exists on java 7 - jvmArg '-XX:MaxPermSize=128m' - } - jvmArg '-XX:MaxDirectMemorySize=512m' jvmArg '-XX:+HeapDumpOnOutOfMemoryError' File heapdumpDir = new File(project.buildDir, 'heapdump') heapdumpDir.mkdirs() jvmArg '-XX:HeapDumpPath=' + heapdumpDir - /* - * We only want to append -XX:-OmitStackTraceInFastThrow if a flag for OmitStackTraceInFastThrow is not already included in - * tests.jvm.argline. - */ - final String testsJvmArgline = System.getProperty('tests.jvm.argline') - if (testsJvmArgline == null) { - argLine '-XX:-OmitStackTraceInFastThrow' - } else if (testsJvmArgline.indexOf("OmitStackTraceInFastThrow") < 0) { - argLine testsJvmArgline.trim() + ' ' + '-XX:-OmitStackTraceInFastThrow' - } else { - argLine testsJvmArgline.trim() - } + argLine System.getProperty('tests.jvm.argline') // we use './temp' since this is per JVM and tests are forbidden from writing to CWD systemProperty 'java.io.tmpdir', './temp' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy new file mode 100644 index 00000000000..34c3046aa2b --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/AntFixture.groovy @@ -0,0 +1,291 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.test + +import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.AntTask +import org.elasticsearch.gradle.LoggedExec +import org.gradle.api.GradleException +import org.gradle.api.Task +import org.gradle.api.tasks.Exec +import org.gradle.api.tasks.Input + +/** + * A fixture for integration tests which runs in a separate process launched by Ant. + */ +public class AntFixture extends AntTask implements Fixture { + + /** The path to the executable that starts the fixture. */ + @Input + String executable + + private final List<Object> arguments = new ArrayList<>() + + @Input + public void args(Object... args) { + arguments.addAll(args) + } + + /** + * Environment variables for the fixture process. The value can be any object, which + * will have toString() called at execution time. + */ + private final Map<String, Object> environment = new HashMap<>() + + @Input + public void env(String key, Object value) { + environment.put(key, value) + } + + /** A flag to indicate whether the command should be executed from a shell. */ + @Input + boolean useShell = false + + /** + * A flag to indicate whether the fixture should be run in the foreground, or spawned. + * It is protected so subclasses can override (eg RunTask). + */ + protected boolean spawn = true + + /** + * A closure to call before the fixture is considered ready. The closure is passed the fixture object, + * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait + * condition is for http on the http port. + */ + @Input + Closure waitCondition = { AntFixture fixture, AntBuilder ant -> + File tmpFile = new File(fixture.cwd, 'wait.success') + ant.get(src: "http://${fixture.addressAndPort}", + dest: tmpFile.toString(), + ignoreerrors: true, // do not fail on error, so logging information can be flushed + retries: 10) + return tmpFile.exists() + } + + private final Task stopTask + + public AntFixture() { + stopTask = createStopTask() + finalizedBy(stopTask) + } + + @Override + public Task getStopTask() { + return stopTask + } + + @Override + protected void runAnt(AntBuilder ant) { + project.delete(baseDir) // reset everything + cwd.mkdirs() + final String realExecutable + final List<Object> realArgs = new ArrayList<>() + final Map<String, Object> realEnv = environment + // We need to choose which executable we are using. In shell mode, or when we + // are spawning and thus using the wrapper script, the executable is the shell. + if (useShell || spawn) { + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + realExecutable = 'cmd' + realArgs.add('/C') + realArgs.add('"') // quote the entire command + } else { + realExecutable = 'sh' + } + } else { + realExecutable = executable + realArgs.addAll(arguments) + } + if (spawn) { + writeWrapperScript(executable) + realArgs.add(wrapperScript) + realArgs.addAll(arguments) + } + if (Os.isFamily(Os.FAMILY_WINDOWS) && (useShell || spawn)) { + realArgs.add('"') + } + commandString.eachLine { line -> logger.info(line) } + + ant.exec(executable: realExecutable, spawn: spawn, dir: cwd, taskname: name) { + realEnv.each { key, value -> env(key: key, value: value) } + realArgs.each { arg(value: it) } + } + + String failedProp = "failed${name}" + // first wait for resources, or the failure marker from the wrapper script + ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { + or { + resourceexists { + file(file: failureMarker.toString()) + } + and { + resourceexists { + file(file: pidFile.toString()) + } + resourceexists { + file(file: portsFile.toString()) + } + } + } + } + + if (ant.project.getProperty(failedProp) || failureMarker.exists()) { + fail("Failed to start ${name}") + } + + // the process is started (has a pid) and is bound to a network interface + // so now wait undil the waitCondition has been met + // TODO: change this to a loop? + boolean success + try { + success = waitCondition(this, ant) == false + } catch (Exception e) { + String msg = "Wait condition caught exception for ${name}" + logger.error(msg, e) + fail(msg, e) + } + if (success == false) { + fail("Wait condition failed for ${name}") + } + } + + /** Returns a debug string used to log information about how the fixture was run. */ + protected String getCommandString() { + String commandString = "\n${name} configuration:\n" + commandString += "-----------------------------------------\n" + commandString += " cwd: ${cwd}\n" + commandString += " command: ${executable} ${arguments.join(' ')}\n" + commandString += ' environment:\n' + environment.each { k, v -> commandString += " ${k}: ${v}\n" } + if (spawn) { + commandString += "\n [${wrapperScript.name}]\n" + wrapperScript.eachLine('UTF-8', { line -> commandString += " ${line}\n"}) + } + return commandString + } + + /** + * Writes a script to run the real executable, so that stdout/stderr can be captured. + * TODO: this could be removed if we do use our own ProcessBuilder and pump output from the process + */ + private void writeWrapperScript(String executable) { + wrapperScript.parentFile.mkdirs() + String argsPasser = '"$@"' + String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi" + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + argsPasser = '%*' + exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )" + } + wrapperScript.setText("\"${executable}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') + } + + /** Fail the build with the given message, and logging relevant info*/ + private void fail(String msg, Exception... suppressed) { + if (logger.isInfoEnabled() == false) { + // We already log the command at info level. No need to do it twice. + commandString.eachLine { line -> logger.error(line) } + } + logger.error("${name} output:") + logger.error("-----------------------------------------") + logger.error(" failure marker exists: ${failureMarker.exists()}") + logger.error(" pid file exists: ${pidFile.exists()}") + logger.error(" ports file exists: ${portsFile.exists()}") + // also dump the log file for the startup script (which will include ES logging output to stdout) + if (runLog.exists()) { + logger.error("\n [log]") + runLog.eachLine { line -> logger.error(" ${line}") } + } + logger.error("-----------------------------------------") + GradleException toThrow = new GradleException(msg) + for (Exception e : suppressed) { + toThrow.addSuppressed(e) + } + throw toThrow + } + + /** Adds a task to kill an elasticsearch node with the given pidfile */ + private Task createStopTask() { + final AntFixture fixture = this + final Object pid = "${ -> fixture.pid }" + Exec stop = project.tasks.create(name: "${name}#stop", type: LoggedExec) + stop.onlyIf { fixture.pidFile.exists() } + stop.doFirst { + logger.info("Shutting down ${fixture.name} with pid ${pid}") + } + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + stop.executable = 'Taskkill' + stop.args('/PID', pid, '/F') + } else { + stop.executable = 'kill' + stop.args('-9', pid) + } + stop.doLast { + project.delete(fixture.pidFile) + } + return stop + } + + /** + * A path relative to the build dir that all configuration and runtime files + * will live in for this fixture + */ + protected File getBaseDir() { + return new File(project.buildDir, "fixtures/${name}") + } + + /** Returns the working directory for the process. Defaults to "cwd" inside baseDir. */ + protected File getCwd() { + return new File(baseDir, 'cwd') + } + + /** Returns the file the process writes its pid to. Defaults to "pid" inside baseDir. */ + protected File getPidFile() { + return new File(baseDir, 'pid') + } + + /** Reads the pid file and returns the process' pid */ + public int getPid() { + return Integer.parseInt(pidFile.getText('UTF-8').trim()) + } + + /** Returns the file the process writes its bound ports to. Defaults to "ports" inside baseDir. */ + protected File getPortsFile() { + return new File(baseDir, 'ports') + } + + /** Returns an address and port suitable for a uri to connect to this node over http */ + public String getAddressAndPort() { + return portsFile.readLines("UTF-8").get(0) + } + + /** Returns a file that wraps around the actual command when {@code spawn == true}. */ + protected File getWrapperScript() { + return new File(cwd, Os.isFamily(Os.FAMILY_WINDOWS) ? 'run.bat' : 'run') + } + + /** Returns a file that the wrapper script writes when the command failed. */ + protected File getFailureMarker() { + return new File(cwd, 'run.failed') + } + + /** Returns a file that the wrapper script writes when the command failed. */ + protected File getRunLog() { + return new File(cwd, 'run.log') + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 8a4dc72d15a..4e9d8a63f0f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -208,7 +208,7 @@ class ClusterFormationTasks { start.finalizedBy(stop) for (Object dependency : config.dependencies) { if (dependency instanceof Fixture) { - Task depStop = ((Fixture)dependency).stopTask + def depStop = ((Fixture)dependency).stopTask runner.finalizedBy(depStop) start.finalizedBy(depStop) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/Fixture.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/Fixture.groovy index 46b81624ba3..498a1627b35 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/Fixture.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/Fixture.groovy @@ -16,272 +16,15 @@ * specific language governing permissions and limitations * under the License. */ - package org.elasticsearch.gradle.test -import org.apache.tools.ant.taskdefs.condition.Os -import org.elasticsearch.gradle.AntTask -import org.elasticsearch.gradle.LoggedExec -import org.gradle.api.GradleException -import org.gradle.api.Task -import org.gradle.api.tasks.Exec -import org.gradle.api.tasks.Input - /** - * A fixture for integration tests which runs in a separate process. + * Any object that can produce an accompanying stop task, meant to tear down + * a previously instantiated service. */ -public class Fixture extends AntTask { - - /** The path to the executable that starts the fixture. */ - @Input - String executable - - private final List<Object> arguments = new ArrayList<>() - - @Input - public void args(Object... args) { - arguments.addAll(args) - } - - /** - * Environment variables for the fixture process. The value can be any object, which - * will have toString() called at execution time. - */ - private final Map<String, Object> environment = new HashMap<>() - - @Input - public void env(String key, Object value) { - environment.put(key, value) - } - - /** A flag to indicate whether the command should be executed from a shell. */ - @Input - boolean useShell = false - - /** - * A flag to indicate whether the fixture should be run in the foreground, or spawned. - * It is protected so subclasses can override (eg RunTask). - */ - protected boolean spawn = true - - /** - * A closure to call before the fixture is considered ready. The closure is passed the fixture object, - * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait - * condition is for http on the http port. - */ - @Input - Closure waitCondition = { Fixture fixture, AntBuilder ant -> - File tmpFile = new File(fixture.cwd, 'wait.success') - ant.get(src: "http://${fixture.addressAndPort}", - dest: tmpFile.toString(), - ignoreerrors: true, // do not fail on error, so logging information can be flushed - retries: 10) - return tmpFile.exists() - } +public interface Fixture { /** A task which will stop this fixture. This should be used as a finalizedBy for any tasks that use the fixture. */ - public final Task stopTask + public Object getStopTask() - public Fixture() { - stopTask = createStopTask() - finalizedBy(stopTask) - } - - @Override - protected void runAnt(AntBuilder ant) { - project.delete(baseDir) // reset everything - cwd.mkdirs() - final String realExecutable - final List<Object> realArgs = new ArrayList<>() - final Map<String, Object> realEnv = environment - // We need to choose which executable we are using. In shell mode, or when we - // are spawning and thus using the wrapper script, the executable is the shell. - if (useShell || spawn) { - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - realExecutable = 'cmd' - realArgs.add('/C') - realArgs.add('"') // quote the entire command - } else { - realExecutable = 'sh' - } - } else { - realExecutable = executable - realArgs.addAll(arguments) - } - if (spawn) { - writeWrapperScript(executable) - realArgs.add(wrapperScript) - realArgs.addAll(arguments) - } - if (Os.isFamily(Os.FAMILY_WINDOWS) && (useShell || spawn)) { - realArgs.add('"') - } - commandString.eachLine { line -> logger.info(line) } - - ant.exec(executable: realExecutable, spawn: spawn, dir: cwd, taskname: name) { - realEnv.each { key, value -> env(key: key, value: value) } - realArgs.each { arg(value: it) } - } - - String failedProp = "failed${name}" - // first wait for resources, or the failure marker from the wrapper script - ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) { - or { - resourceexists { - file(file: failureMarker.toString()) - } - and { - resourceexists { - file(file: pidFile.toString()) - } - resourceexists { - file(file: portsFile.toString()) - } - } - } - } - - if (ant.project.getProperty(failedProp) || failureMarker.exists()) { - fail("Failed to start ${name}") - } - - // the process is started (has a pid) and is bound to a network interface - // so now wait undil the waitCondition has been met - // TODO: change this to a loop? - boolean success - try { - success = waitCondition(this, ant) == false - } catch (Exception e) { - String msg = "Wait condition caught exception for ${name}" - logger.error(msg, e) - fail(msg, e) - } - if (success == false) { - fail("Wait condition failed for ${name}") - } - } - - /** Returns a debug string used to log information about how the fixture was run. */ - protected String getCommandString() { - String commandString = "\n${name} configuration:\n" - commandString += "-----------------------------------------\n" - commandString += " cwd: ${cwd}\n" - commandString += " command: ${executable} ${arguments.join(' ')}\n" - commandString += ' environment:\n' - environment.each { k, v -> commandString += " ${k}: ${v}\n" } - if (spawn) { - commandString += "\n [${wrapperScript.name}]\n" - wrapperScript.eachLine('UTF-8', { line -> commandString += " ${line}\n"}) - } - return commandString - } - - /** - * Writes a script to run the real executable, so that stdout/stderr can be captured. - * TODO: this could be removed if we do use our own ProcessBuilder and pump output from the process - */ - private void writeWrapperScript(String executable) { - wrapperScript.parentFile.mkdirs() - String argsPasser = '"$@"' - String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi" - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - argsPasser = '%*' - exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )" - } - wrapperScript.setText("\"${executable}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') - } - - /** Fail the build with the given message, and logging relevant info*/ - private void fail(String msg, Exception... suppressed) { - if (logger.isInfoEnabled() == false) { - // We already log the command at info level. No need to do it twice. - commandString.eachLine { line -> logger.error(line) } - } - logger.error("${name} output:") - logger.error("-----------------------------------------") - logger.error(" failure marker exists: ${failureMarker.exists()}") - logger.error(" pid file exists: ${pidFile.exists()}") - logger.error(" ports file exists: ${portsFile.exists()}") - // also dump the log file for the startup script (which will include ES logging output to stdout) - if (runLog.exists()) { - logger.error("\n [log]") - runLog.eachLine { line -> logger.error(" ${line}") } - } - logger.error("-----------------------------------------") - GradleException toThrow = new GradleException(msg) - for (Exception e : suppressed) { - toThrow.addSuppressed(e) - } - throw toThrow - } - - /** Adds a task to kill an elasticsearch node with the given pidfile */ - private Task createStopTask() { - final Fixture fixture = this - final Object pid = "${ -> fixture.pid }" - Exec stop = project.tasks.create(name: "${name}#stop", type: LoggedExec) - stop.onlyIf { fixture.pidFile.exists() } - stop.doFirst { - logger.info("Shutting down ${fixture.name} with pid ${pid}") - } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - stop.executable = 'Taskkill' - stop.args('/PID', pid, '/F') - } else { - stop.executable = 'kill' - stop.args('-9', pid) - } - stop.doLast { - project.delete(fixture.pidFile) - } - return stop - } - - /** - * A path relative to the build dir that all configuration and runtime files - * will live in for this fixture - */ - protected File getBaseDir() { - return new File(project.buildDir, "fixtures/${name}") - } - - /** Returns the working directory for the process. Defaults to "cwd" inside baseDir. */ - protected File getCwd() { - return new File(baseDir, 'cwd') - } - - /** Returns the file the process writes its pid to. Defaults to "pid" inside baseDir. */ - protected File getPidFile() { - return new File(baseDir, 'pid') - } - - /** Reads the pid file and returns the process' pid */ - public int getPid() { - return Integer.parseInt(pidFile.getText('UTF-8').trim()) - } - - /** Returns the file the process writes its bound ports to. Defaults to "ports" inside baseDir. */ - protected File getPortsFile() { - return new File(baseDir, 'ports') - } - - /** Returns an address and port suitable for a uri to connect to this node over http */ - public String getAddressAndPort() { - return portsFile.readLines("UTF-8").get(0) - } - - /** Returns a file that wraps around the actual command when {@code spawn == true}. */ - protected File getWrapperScript() { - return new File(cwd, Os.isFamily(Os.FAMILY_WINDOWS) ? 'run.bat' : 'run') - } - - /** Returns a file that the wrapper script writes when the command failed. */ - protected File getFailureMarker() { - return new File(cwd, 'run.failed') - } - - /** Returns a file that the wrapper script writes when the command failed. */ - protected File getRunLog() { - return new File(cwd, 'run.log') - } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 98ee91e37a8..0b19822a7f1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -129,7 +129,7 @@ public class RestIntegTestTask extends DefaultTask { runner.dependsOn(dependencies) for (Object dependency : dependencies) { if (dependency instanceof Fixture) { - runner.finalizedBy(((Fixture)dependency).stopTask) + runner.finalizedBy(((Fixture)dependency).getStopTask()) } } return this @@ -140,7 +140,7 @@ public class RestIntegTestTask extends DefaultTask { runner.setDependsOn(dependencies) for (Object dependency : dependencies) { if (dependency instanceof Fixture) { - runner.finalizedBy(((Fixture)dependency).stopTask) + runner.finalizedBy(((Fixture)dependency).getStopTask()) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/VagrantFixture.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/VagrantFixture.groovy new file mode 100644 index 00000000000..fa08a8f9c66 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/VagrantFixture.groovy @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.test + +import org.elasticsearch.gradle.vagrant.VagrantCommandTask +import org.gradle.api.Task + +/** + * A fixture for integration tests which runs in a virtual machine launched by Vagrant. + */ +class VagrantFixture extends VagrantCommandTask implements Fixture { + + private VagrantCommandTask stopTask + + public VagrantFixture() { + this.stopTask = project.tasks.create(name: "${name}#stop", type: VagrantCommandTask) { + command 'halt' + } + finalizedBy this.stopTask + } + + @Override + void setBoxName(String boxName) { + super.setBoxName(boxName) + this.stopTask.setBoxName(boxName) + } + + @Override + void setEnvironmentVars(Map<String, String> environmentVars) { + super.setEnvironmentVars(environmentVars) + this.stopTask.setEnvironmentVars(environmentVars) + } + + @Override + public Task getStopTask() { + return this.stopTask + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy index 65b90c4d9a0..110f2fc7e84 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy @@ -27,12 +27,15 @@ import org.gradle.api.tasks.Input public class BatsOverVagrantTask extends VagrantCommandTask { @Input - String command + String remoteCommand BatsOverVagrantTask() { - project.afterEvaluate { - args 'ssh', boxName, '--command', command - } + command = 'ssh' + } + + void setRemoteCommand(String remoteCommand) { + this.remoteCommand = Objects.requireNonNull(remoteCommand) + setArgs(['--command', remoteCommand]) } @Override diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy index abc6af9e09d..aab120e8d04 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy @@ -21,9 +21,15 @@ package org.elasticsearch.gradle.vagrant import org.apache.commons.io.output.TeeOutputStream import org.elasticsearch.gradle.LoggedExec import org.gradle.api.tasks.Input +import org.gradle.api.tasks.Optional +import org.gradle.api.tasks.TaskAction import org.gradle.internal.logging.progress.ProgressLoggerFactory import javax.inject.Inject +import java.util.concurrent.CountDownLatch +import java.util.concurrent.locks.Lock +import java.util.concurrent.locks.ReadWriteLock +import java.util.concurrent.locks.ReentrantLock /** * Runs a vagrant command. Pretty much like Exec task but with a nicer output @@ -31,6 +37,12 @@ import javax.inject.Inject */ public class VagrantCommandTask extends LoggedExec { + @Input + String command + + @Input @Optional + String subcommand + @Input String boxName @@ -40,11 +52,27 @@ public class VagrantCommandTask extends LoggedExec { public VagrantCommandTask() { executable = 'vagrant' + // We're using afterEvaluate here to slot in some logic that captures configurations and + // modifies the command line right before the main execution happens. The reason that we + // call doFirst instead of just doing the work in the afterEvaluate is that the latter + // restricts how subclasses can extend functionality. Calling afterEvaluate is like having + // all the logic of a task happening at construction time, instead of at execution time + // where a subclass can override or extend the logic. project.afterEvaluate { - // It'd be nice if --machine-readable were, well, nice - standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream()) - if (environmentVars != null) { - environment environmentVars + doFirst { + if (environmentVars != null) { + environment environmentVars + } + + // Build our command line for vagrant + def vagrantCommand = [executable, command] + if (subcommand != null) { + vagrantCommand = vagrantCommand + subcommand + } + commandLine([*vagrantCommand, boxName, *args]) + + // It'd be nice if --machine-readable were, well, nice + standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream()) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 2fb047e9305..9df6d36ef01 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -17,7 +17,7 @@ class VagrantTestPlugin implements Plugin<Project> { 'centos-6', 'centos-7', 'debian-8', - 'fedora-24', + 'fedora-25', 'oel-6', 'oel-7', 'opensuse-13', @@ -391,21 +391,23 @@ class VagrantTestPlugin implements Plugin<Project> { // always add a halt task for all boxes, so clean makes sure they are all shutdown Task halt = project.tasks.create("vagrant${boxTask}#halt", VagrantCommandTask) { + command 'halt' boxName box environmentVars vagrantEnvVars - args 'halt', box } stop.dependsOn(halt) Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) { + command 'box' + subcommand 'update' boxName box environmentVars vagrantEnvVars - args 'box', 'update', box dependsOn vagrantCheckVersion, virtualboxCheckVersion } update.mustRunAfter(setupBats) Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) { + command 'up' boxName box environmentVars vagrantEnvVars /* Its important that we try to reprovision the box even if it already @@ -418,7 +420,7 @@ class VagrantTestPlugin implements Plugin<Project> { vagrant's default but its possible to change that default and folks do. But the boxes that we use are unlikely to work properly with other virtualization providers. Thus the lock. */ - args 'up', box, '--provision', '--provider', 'virtualbox' + args '--provision', '--provider', 'virtualbox' /* It'd be possible to check if the box is already up here and output SKIPPED but that would require running vagrant status which is slow! */ dependsOn update @@ -434,11 +436,11 @@ class VagrantTestPlugin implements Plugin<Project> { vagrantSmokeTest.dependsOn(smoke) Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) { + remoteCommand BATS_TEST_COMMAND boxName box environmentVars vagrantEnvVars dependsOn up, setupBats finalizedBy halt - command BATS_TEST_COMMAND } TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() { @@ -461,11 +463,12 @@ class VagrantTestPlugin implements Plugin<Project> { } Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { + command 'ssh' boxName box environmentVars vagrantEnvVars dependsOn up finalizedBy halt - args 'ssh', boxName, '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.extensions.esvagrant.formattedTestSeed}" + args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.extensions.esvagrant.formattedTestSeed}" } TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() { @Override diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index a7f01ef5528..ea25a1bad8a 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -80,6 +80,8 @@ public class Version implements Comparable<Version> { public static final Version V_5_3_2_UNRELEASED = new Version(V_5_3_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2); public static final int V_5_4_0_ID_UNRELEASED = 5040099; public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0); + public static final int V_5_4_1_ID_UNRELEASED = 5040199; + public static final Version V_5_4_1_UNRELEASED = new Version(V_5_4_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_1); public static final int V_5_5_0_ID_UNRELEASED = 5050099; public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0); public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001; @@ -104,6 +106,8 @@ public class Version implements Comparable<Version> { return V_6_0_0_alpha1_UNRELEASED; case V_5_5_0_ID_UNRELEASED: return V_5_5_0_UNRELEASED; + case V_5_4_1_ID_UNRELEASED: + return V_5_4_1_UNRELEASED; case V_5_4_0_ID_UNRELEASED: return V_5_4_0_UNRELEASED; case V_5_3_2_ID_UNRELEASED: diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 3ee5f56ebdc..c2fb90434e5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.internal.AliasFilter; import java.io.IOException; +import java.util.Arrays; import java.util.HashMap; import java.util.Map; @@ -117,10 +118,14 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo String index = entry.getKey(); builder.startObject(index); AliasFilter aliasFilter = entry.getValue(); - if (aliasFilter.getAliases().length > 0) { - builder.array("aliases", aliasFilter.getAliases()); - builder.field("filter"); - aliasFilter.getQueryBuilder().toXContent(builder, params); + String[] aliases = aliasFilter.getAliases(); + if (aliases.length > 0) { + Arrays.sort(aliases); // we want consistent ordering here and these values might be generated from a set / map + builder.array("aliases", aliases); + if (aliasFilter.getQueryBuilder() != null) { // might be null if we include non-filtering aliases + builder.field("filter"); + aliasFilter.getQueryBuilder().toXContent(builder, params); + } } builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 8825a426768..20ed69ae5a9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -83,8 +83,10 @@ public class TransportClusterSearchShardsAction extends Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Map<String, AliasFilter> indicesAndFilters = new HashMap<>(); for (String index : concreteIndices) { - AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, request.indices()); - indicesAndFilters.put(index, aliasFilter); + final AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, request.indices()); + final String[] aliases = indexNameExpressionResolver.indexAliases(clusterState, index, aliasMetadata -> true, true, + request.indices()); + indicesAndFilters.put(index, new AliasFilter(aliasFilter.getQueryBuilder(), aliases)); } Set<String> nodeIds = new HashSet<>(); diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index de520ee6274..1e468624516 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; import java.util.Map; @@ -29,7 +30,7 @@ import java.util.Map; /** * Response for {@link FieldCapabilitiesIndexRequest} requests. */ -public class FieldCapabilitiesIndexResponse extends ActionResponse { +public class FieldCapabilitiesIndexResponse extends ActionResponse implements Writeable { private String indexName; private Map<String, FieldCapabilities> responseMap; @@ -41,6 +42,10 @@ public class FieldCapabilitiesIndexResponse extends ActionResponse { FieldCapabilitiesIndexResponse() { } + FieldCapabilitiesIndexResponse(StreamInput input) throws IOException { + this.readFrom(input); + } + /** * Get the index name diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 7eab9112162..ce1ba282899 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -38,13 +39,14 @@ import java.util.Set; import static org.elasticsearch.common.xcontent.ObjectParser.fromList; -public class FieldCapabilitiesRequest extends ActionRequest - implements IndicesRequest.Replaceable { +public final class FieldCapabilitiesRequest extends ActionRequest implements IndicesRequest.Replaceable { public static final ParseField FIELDS_FIELD = new ParseField("fields"); public static final String NAME = "field_caps_request"; private String[] indices = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); private String[] fields = Strings.EMPTY_ARRAY; + // pkg private API mainly for cross cluster search to signal that we do multiple reductions ie. the results should not be merged + private boolean mergeResults = true; private static ObjectParser<FieldCapabilitiesRequest, Void> PARSER = new ObjectParser<>(NAME, FieldCapabilitiesRequest::new); @@ -56,16 +58,39 @@ public class FieldCapabilitiesRequest extends ActionRequest public FieldCapabilitiesRequest() {} + /** + * Returns <code>true</code> iff the results should be merged. + */ + boolean isMergeResults() { + return mergeResults; + } + + /** + * if set to <code>true</code> the response will contain only a merged view of the per index field capabilities. Otherwise only + * unmerged per index field capabilities are returned. + */ + void setMergeResults(boolean mergeResults) { + this.mergeResults = mergeResults; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); fields = in.readStringArray(); + if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) { + mergeResults = in.readBoolean(); + } else { + mergeResults = true; + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(fields); + if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) { + out.writeBoolean(mergeResults); + } } public static FieldCapabilitiesRequest parseFields(XContentParser parser) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 9ff2cf3850b..00c424e1fe7 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.fieldcaps; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -34,9 +36,20 @@ import java.util.Map; */ public class FieldCapabilitiesResponse extends ActionResponse implements ToXContent { private Map<String, Map<String, FieldCapabilities>> responseMap; + private List<FieldCapabilitiesIndexResponse> indexResponses; FieldCapabilitiesResponse(Map<String, Map<String, FieldCapabilities>> responseMap) { + this(responseMap, Collections.emptyList()); + } + + FieldCapabilitiesResponse(List<FieldCapabilitiesIndexResponse> indexResponses) { + this(Collections.emptyMap(), indexResponses); + } + + private FieldCapabilitiesResponse(Map<String, Map<String, FieldCapabilities>> responseMap, + List<FieldCapabilitiesIndexResponse> indexResponses) { this.responseMap = responseMap; + this.indexResponses = indexResponses; } /** @@ -53,6 +66,13 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont return responseMap; } + + /** + * Returns the actual per-index field caps responses + */ + List<FieldCapabilitiesIndexResponse> getIndexResponses() { + return indexResponses; + } /** * * Get the field capabilities per type for the provided {@code field}. @@ -66,6 +86,11 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont super.readFrom(in); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); + if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) { + indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); + } else { + indexResponses = Collections.emptyList(); + } } private static Map<String, FieldCapabilities> readField(StreamInput in) throws IOException { @@ -76,6 +101,10 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); + if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) { + out.writeList(indexResponses); + } + } private static void writeField(StreamOutput out, diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index a7f268eaf5d..6491b8ce4c7 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterState; @@ -27,18 +28,27 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; -public class TransportFieldCapabilitiesAction - extends HandledTransportAction<FieldCapabilitiesRequest, FieldCapabilitiesResponse> { +public class TransportFieldCapabilitiesAction extends HandledTransportAction<FieldCapabilitiesRequest, FieldCapabilitiesResponse> { private final ClusterService clusterService; private final TransportFieldCapabilitiesIndexAction shardAction; + private final RemoteClusterService remoteClusterService; + private final TransportService transportService; @Inject public TransportFieldCapabilitiesAction(Settings settings, TransportService transportService, @@ -50,71 +60,97 @@ public class TransportFieldCapabilitiesAction super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, FieldCapabilitiesRequest::new); this.clusterService = clusterService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.transportService = transportService; this.shardAction = shardAction; } @Override protected void doExecute(FieldCapabilitiesRequest request, final ActionListener<FieldCapabilitiesResponse> listener) { - ClusterState clusterState = clusterService.state(); - String[] concreteIndices = - indexNameExpressionResolver.concreteIndexNames(clusterState, request); - final AtomicInteger indexCounter = new AtomicInteger(); - final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length); - final AtomicReferenceArray<Object> indexResponses = - new AtomicReferenceArray<>(concreteIndices.length); - if (concreteIndices.length == 0) { + final ClusterState clusterState = clusterService.state(); + final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(request.indicesOptions(), + request.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState)); + final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices); + final int totalNumRequest = concreteIndices.length + remoteClusterIndices.size(); + final CountDown completionCounter = new CountDown(totalNumRequest); + final List<FieldCapabilitiesIndexResponse> indexResponses = Collections.synchronizedList(new ArrayList<>()); + final Runnable onResponse = () -> { + if (completionCounter.countDown()) { + if (request.isMergeResults()) { + listener.onResponse(merge(indexResponses)); + } else { + listener.onResponse(new FieldCapabilitiesResponse(indexResponses)); + } + } + }; + if (totalNumRequest == 0) { listener.onResponse(new FieldCapabilitiesResponse()); } else { + ActionListener<FieldCapabilitiesIndexResponse> innerListener = new ActionListener<FieldCapabilitiesIndexResponse>() { + @Override + public void onResponse(FieldCapabilitiesIndexResponse result) { + indexResponses.add(result); + onResponse.run(); + } + + @Override + public void onFailure(Exception e) { + // TODO we should somehow inform the user that we failed + onResponse.run(); + } + }; for (String index : concreteIndices) { - FieldCapabilitiesIndexRequest indexRequest = - new FieldCapabilitiesIndexRequest(request.fields(), index); - shardAction.execute(indexRequest, - new ActionListener<FieldCapabilitiesIndexResponse> () { + shardAction.execute(new FieldCapabilitiesIndexRequest(request.fields(), index), innerListener); + } + + // this is the cross cluster part of this API - we force the other cluster to not merge the results but instead + // send us back all individual index results. + for (Map.Entry<String, OriginalIndices> remoteIndices : remoteClusterIndices.entrySet()) { + String clusterAlias = remoteIndices.getKey(); + OriginalIndices originalIndices = remoteIndices.getValue(); + Transport.Connection connection = remoteClusterService.getConnection(remoteIndices.getKey()); + FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest(); + remoteRequest.setMergeResults(false); // we need to merge on this node + remoteRequest.indicesOptions(originalIndices.indicesOptions()); + remoteRequest.indices(originalIndices.indices()); + remoteRequest.fields(request.fields()); + transportService.sendRequest(connection, FieldCapabilitiesAction.NAME, remoteRequest, TransportRequestOptions.EMPTY, + new TransportResponseHandler<FieldCapabilitiesResponse>() { @Override - public void onResponse(FieldCapabilitiesIndexResponse result) { - indexResponses.set(indexCounter.getAndIncrement(), result); - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(merge(indexResponses)); - } + public FieldCapabilitiesResponse newInstance() { + return new FieldCapabilitiesResponse(); } @Override - public void onFailure(Exception e) { - indexResponses.set(indexCounter.getAndIncrement(), e); - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(merge(indexResponses)); + public void handleResponse(FieldCapabilitiesResponse response) { + for (FieldCapabilitiesIndexResponse res : response.getIndexResponses()) { + indexResponses.add(new FieldCapabilitiesIndexResponse(RemoteClusterAware.buildRemoteIndexName(clusterAlias, + res.getIndexName()), res.get())); } + onResponse.run(); + } + + @Override + public void handleException(TransportException exp) { + onResponse.run(); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; } }); } + } } - private FieldCapabilitiesResponse merge(AtomicReferenceArray<Object> indexResponses) { + private FieldCapabilitiesResponse merge(List<FieldCapabilitiesIndexResponse> indexResponses) { Map<String, Map<String, FieldCapabilities.Builder>> responseMapBuilder = new HashMap<> (); - for (int i = 0; i < indexResponses.length(); i++) { - Object element = indexResponses.get(i); - if (element instanceof FieldCapabilitiesIndexResponse == false) { - assert element instanceof Exception; - continue; - } - FieldCapabilitiesIndexResponse response = (FieldCapabilitiesIndexResponse) element; - for (String field : response.get().keySet()) { - Map<String, FieldCapabilities.Builder> typeMap = responseMapBuilder.get(field); - if (typeMap == null) { - typeMap = new HashMap<> (); - responseMapBuilder.put(field, typeMap); - } - FieldCapabilities fieldCap = response.getField(field); - FieldCapabilities.Builder builder = typeMap.get(fieldCap.getType()); - if (builder == null) { - builder = new FieldCapabilities.Builder(field, fieldCap.getType()); - typeMap.put(fieldCap.getType(), builder); - } - builder.add(response.getIndexName(), - fieldCap.isSearchable(), fieldCap.isAggregatable()); - } + for (FieldCapabilitiesIndexResponse response : indexResponses) { + innerMerge(responseMapBuilder, response.getIndexName(), response.get()); } Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>(); @@ -131,4 +167,16 @@ public class TransportFieldCapabilitiesAction return new FieldCapabilitiesResponse(responseMap); } + + private void innerMerge(Map<String, Map<String, FieldCapabilities.Builder>> responseMapBuilder, String indexName, + Map<String, FieldCapabilities> map) { + for (Map.Entry<String, FieldCapabilities> entry : map.entrySet()) { + final String field = entry.getKey(); + final FieldCapabilities fieldCap = entry.getValue(); + Map<String, FieldCapabilities.Builder> typeMap = responseMapBuilder.computeIfAbsent(field, f -> new HashMap<>()); + FieldCapabilities.Builder builder = typeMap.computeIfAbsent(fieldCap.getType(), key -> new FieldCapabilities.Builder(field, + key)); + builder.add(indexName, fieldCap.isSearchable(), fieldCap.isAggregatable()); + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 5bab7276860..b9e6f56b6d7 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -41,34 +41,19 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; -public class TransportFieldCapabilitiesIndexAction - extends TransportSingleShardAction<FieldCapabilitiesIndexRequest, +public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardAction<FieldCapabilitiesIndexRequest, FieldCapabilitiesIndexResponse> { private static final String ACTION_NAME = FieldCapabilitiesAction.NAME + "[index]"; - protected final ClusterService clusterService; private final IndicesService indicesService; @Inject - public TransportFieldCapabilitiesIndexAction(Settings settings, - ClusterService clusterService, - TransportService transportService, - IndicesService indicesService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver - indexNameExpressionResolver) { - super(settings, - ACTION_NAME, - threadPool, - clusterService, - transportService, - actionFilters, - indexNameExpressionResolver, - FieldCapabilitiesIndexRequest::new, - ThreadPool.Names.MANAGEMENT); - this.clusterService = clusterService; + public TransportFieldCapabilitiesIndexAction(Settings settings, ClusterService clusterService, TransportService transportService, + IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, + FieldCapabilitiesIndexRequest::new, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; } @@ -86,11 +71,8 @@ public class TransportFieldCapabilitiesIndexAction } @Override - protected FieldCapabilitiesIndexResponse shardOperation( - final FieldCapabilitiesIndexRequest request, - ShardId shardId) { - MapperService mapperService = - indicesService.indexServiceSafe(shardId.getIndex()).mapperService(); + protected FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesIndexRequest request, ShardId shardId) { + MapperService mapperService = indicesService.indexServiceSafe(shardId.getIndex()).mapperService(); Set<String> fieldNames = new HashSet<>(); for (String field : request.fields()) { fieldNames.addAll(mapperService.simpleMatchToIndexNames(field)); @@ -98,11 +80,10 @@ public class TransportFieldCapabilitiesIndexAction Map<String, FieldCapabilities> responseMap = new HashMap<>(); for (String field : fieldNames) { MappedFieldType ft = mapperService.fullName(field); - FieldCapabilities fieldCap = new FieldCapabilities(field, - ft.typeName(), - ft.isSearchable(), - ft.isAggregatable()); - responseMap.put(field, fieldCap); + if (ft != null) { + FieldCapabilities fieldCap = new FieldCapabilities(field, ft.typeName(), ft.isSearchable(), ft.isAggregatable()); + responseMap.put(field, fieldCap); + } } return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap); } @@ -113,9 +94,7 @@ public class TransportFieldCapabilitiesIndexAction } @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, - InternalRequest request) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, - request.concreteIndex()); + protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, request.concreteIndex()); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 91a23bf6a6f..f65597a966b 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -178,35 +178,17 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest, final SearchTimeProvider timeProvider = new SearchTimeProvider(absoluteStartMillis, relativeStartNanos, System::nanoTime); - final OriginalIndices localIndices; - final Map<String, OriginalIndices> remoteClusterIndices; - final ClusterState clusterState = clusterService.state(); - if (remoteClusterService.isCrossClusterSearchEnabled()) { - final Map<String, List<String>> groupedIndices = remoteClusterService.groupClusterIndices(searchRequest.indices(), - // empty string is not allowed - idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState)); - List<String> remove = groupedIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - String[] indices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]); - localIndices = new OriginalIndices(indices, searchRequest.indicesOptions()); - Map<String, OriginalIndices> originalIndicesMap = new HashMap<>(); - for (Map.Entry<String, List<String>> entry : groupedIndices.entrySet()) { - String clusterAlias = entry.getKey(); - List<String> originalIndices = entry.getValue(); - originalIndicesMap.put(clusterAlias, - new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), searchRequest.indicesOptions())); - } - remoteClusterIndices = Collections.unmodifiableMap(originalIndicesMap); - } else { - remoteClusterIndices = Collections.emptyMap(); - localIndices = new OriginalIndices(searchRequest); - } + final ClusterState clusterState = clusterService.state(); + final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(searchRequest.indicesOptions(), + searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState)); + OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); if (remoteClusterIndices.isEmpty()) { executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(), (clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener); } else { - remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices, - ActionListener.wrap((searchShardsResponses) -> { + remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(), + remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> { List<SearchShardIterator> remoteShardIterators = new ArrayList<>(); Map<String, AliasFilter> remoteAliasFilters = new HashMap<>(); BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses, @@ -230,28 +212,31 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest, for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) { idToDiscoveryNode.put(remoteNode.getId(), remoteNode); } - Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters(); + final Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters(); for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) { //add the cluster name to the remote index names for indices disambiguation //this ends up in the hits returned with the search response ShardId shardId = clusterSearchShardsGroup.getShardId(); Index remoteIndex = shardId.getIndex(); - Index index = new Index(clusterAlias + RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(), + Index index = new Index(RemoteClusterAware.buildRemoteIndexName(clusterAlias, remoteIndex.getName()), remoteIndex.getUUID()); - OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias); - assert originalIndices != null; - SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, new ShardId(index, shardId.getId()), - Arrays.asList(clusterSearchShardsGroup.getShards()), originalIndices); - remoteShardIterators.add(shardIterator); - AliasFilter aliasFilter; + final AliasFilter aliasFilter; if (indicesAndFilters == null) { - aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY); + aliasFilter = AliasFilter.EMPTY; } else { aliasFilter = indicesAndFilters.get(shardId.getIndexName()); - assert aliasFilter != null; + assert aliasFilter != null : "alias filter must not be null for index: " + shardId.getIndex(); } + String[] aliases = aliasFilter.getAliases(); + String[] finalIndices = aliases.length == 0 ? new String[] {shardId.getIndexName()} : aliases; // here we have to map the filters to the UUID since from now on we use the uuid for the lookup aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter); + final OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias); + assert originalIndices != null : "original indices are null for clusterAlias: " + clusterAlias; + SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, new ShardId(index, shardId.getId()), + Arrays.asList(clusterSearchShardsGroup.getShards()), new OriginalIndices(finalIndices, + originalIndices.indicesOptions())); + remoteShardIterators.add(shardIterator); } } return (clusterAlias, nodeId) -> { diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 716472c76f0..60395210165 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -58,9 +58,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -92,19 +90,22 @@ public class ClusterModule extends AbstractModule { public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope); - private final Settings settings; private final ClusterService clusterService; private final IndexNameExpressionResolver indexNameExpressionResolver; + private final AllocationDeciders allocationDeciders; + private final AllocationService allocationService; // pkg private for tests - final Collection<AllocationDecider> allocationDeciders; + final Collection<AllocationDecider> deciderList; final ShardsAllocator shardsAllocator; - public ClusterModule(Settings settings, ClusterService clusterService, List<ClusterPlugin> clusterPlugins) { - this.settings = settings; - this.allocationDeciders = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); + public ClusterModule(Settings settings, ClusterService clusterService, List<ClusterPlugin> clusterPlugins, + ClusterInfoService clusterInfoService) { + this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); + this.allocationDeciders = new AllocationDeciders(settings, deciderList); this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins); this.clusterService = clusterService; - indexNameExpressionResolver = new IndexNameExpressionResolver(settings); + this.indexNameExpressionResolver = new IndexNameExpressionResolver(settings); + this.allocationService = new AllocationService(settings, allocationDeciders, shardsAllocator, clusterInfoService); } @@ -213,10 +214,14 @@ public class ClusterModule extends AbstractModule { "ShardsAllocator factory for [" + allocatorName + "] returned null"); } + public AllocationService getAllocationService() { + return allocationService; + } + @Override protected void configure() { bind(GatewayAllocator.class).asEagerSingleton(); - bind(AllocationService.class).asEagerSingleton(); + bind(AllocationService.class).toInstance(allocationService); bind(ClusterService.class).toInstance(clusterService); bind(NodeConnectionsService.class).asEagerSingleton(); bind(MetaDataCreateIndexService.class).asEagerSingleton(); @@ -233,7 +238,7 @@ public class ClusterModule extends AbstractModule { bind(NodeMappingRefreshAction.class).asEagerSingleton(); bind(MappingUpdatedAction.class).asEagerSingleton(); bind(TaskResultsService.class).asEagerSingleton(); - bind(AllocationDeciders.class).toInstance(new AllocationDeciders(settings, allocationDeciders)); + bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 168fe2ad7f2..d4c6ec587db 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -48,6 +48,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.function.Predicate; import java.util.stream.Collectors; public class IndexNameExpressionResolver extends AbstractComponent { @@ -268,8 +269,19 @@ public class IndexNameExpressionResolver extends AbstractComponent { * the index itself - null is returned. Returns <tt>null</tt> if no filtering is required. */ public String[] filteringAliases(ClusterState state, String index, String... expressions) { + return indexAliases(state, index, AliasMetaData::filteringRequired, false, expressions); + } + + /** + * Iterates through the list of indices and selects the effective list of required aliases for the + * given index. + * <p>Only aliases where the given predicate tests successfully are returned. If the indices list contains a non-required reference to + * the index itself - null is returned. Returns <tt>null</tt> if no filtering is required. + */ + public String[] indexAliases(ClusterState state, String index, Predicate<AliasMetaData> requiredAlias, boolean skipIdentity, + String... expressions) { // expand the aliases wildcard - List<String> resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.<String>emptyList(); + List<String> resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList(); Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true); for (ExpressionResolver expressionResolver : expressionResolvers) { resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions); @@ -278,54 +290,50 @@ public class IndexNameExpressionResolver extends AbstractComponent { if (isAllIndices(resolvedExpressions)) { return null; } + final IndexMetaData indexMetaData = state.metaData().getIndices().get(index); + if (indexMetaData == null) { + // Shouldn't happen + throw new IndexNotFoundException(index); + } // optimize for the most common single index/alias scenario if (resolvedExpressions.size() == 1) { String alias = resolvedExpressions.get(0); - IndexMetaData indexMetaData = state.metaData().getIndices().get(index); - if (indexMetaData == null) { - // Shouldn't happen - throw new IndexNotFoundException(index); - } + AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias); - boolean filteringRequired = aliasMetaData != null && aliasMetaData.filteringRequired(); - if (!filteringRequired) { + if (aliasMetaData == null || requiredAlias.test(aliasMetaData) == false) { return null; } return new String[]{alias}; } - List<String> filteringAliases = null; + List<String> aliases = null; for (String alias : resolvedExpressions) { if (alias.equals(index)) { - return null; + if (skipIdentity) { + continue; + } else { + return null; + } } - - IndexMetaData indexMetaData = state.metaData().getIndices().get(index); - if (indexMetaData == null) { - // Shouldn't happen - throw new IndexNotFoundException(index); - } - AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias); // Check that this is an alias for the current index // Otherwise - skip it if (aliasMetaData != null) { - boolean filteringRequired = aliasMetaData.filteringRequired(); - if (filteringRequired) { - // If filtering required - add it to the list of filters - if (filteringAliases == null) { - filteringAliases = new ArrayList<>(); + if (requiredAlias.test(aliasMetaData)) { + // If required - add it to the list of aliases + if (aliases == null) { + aliases = new ArrayList<>(); } - filteringAliases.add(alias); + aliases.add(alias); } else { - // If not, we have a non filtering alias for this index - no filtering needed + // If not, we have a non required alias for this index - no futher checking needed return null; } } } - if (filteringAliases == null) { + if (aliases == null) { return null; } - return filteringAliases.toArray(new String[filteringAliases.size()]); + return aliases.toArray(new String[aliases.size()]); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 0e8435d0f8e..caafb82c657 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -238,11 +238,7 @@ public class DiscoveryNode implements Writeable, ToXContent { int rolesSize = in.readVInt(); this.roles = EnumSet.noneOf(Role.class); for (int i = 0; i < rolesSize; i++) { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= Role.values().length) { - throw new IOException("Unknown Role ordinal [" + ordinal + "]"); - } - this.roles.add(Role.values()[ordinal]); + this.roles.add(in.readEnum(Role.class)); } this.version = Version.readVersion(in); } @@ -262,7 +258,7 @@ public class DiscoveryNode implements Writeable, ToXContent { } out.writeVInt(roles.size()); for (Role role : roles) { - out.writeVInt(role.ordinal()); + out.writeEnum(role); } Version.writeVersion(version, out); } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 8974c8a4a9a..6b0f8bfba2a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; @@ -61,20 +60,29 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NOD public class AllocationService extends AbstractComponent { private final AllocationDeciders allocationDeciders; - private final GatewayAllocator gatewayAllocator; + private GatewayAllocator gatewayAllocator; private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; - @Inject - public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, + public AllocationService(Settings settings, AllocationDeciders allocationDeciders, + GatewayAllocator gatewayAllocator, + ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { + this(settings, allocationDeciders, shardsAllocator, clusterInfoService); + setGatewayAllocator(gatewayAllocator); + } + + public AllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { super(settings); this.allocationDeciders = allocationDeciders; - this.gatewayAllocator = gatewayAllocator; this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; } + public void setGatewayAllocator(GatewayAllocator gatewayAllocator) { + this.gatewayAllocator = gatewayAllocator; + } + /** * Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be * provided as parameter and no duplicates should be contained. diff --git a/core/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java b/core/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java index e9966834a01..e83e18ce432 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java +++ b/core/src/main/java/org/elasticsearch/common/geo/ShapeRelation.java @@ -44,16 +44,12 @@ public enum ShapeRelation implements Writeable { } public static ShapeRelation readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown ShapeRelation ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(ShapeRelation.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } public static ShapeRelation getRelationByName(String name) { diff --git a/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java b/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java index acac5fd6690..0b4f640fd28 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java +++ b/core/src/main/java/org/elasticsearch/common/geo/SpatialStrategy.java @@ -40,16 +40,12 @@ public enum SpatialStrategy implements Writeable { } public static SpatialStrategy readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown SpatialStrategy ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(SpatialStrategy.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } public static SpatialStrategy fromString(String strategyName) { diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 0efb99a1fab..4681af3392e 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -901,6 +901,18 @@ public abstract class StreamInput extends InputStream { return builder; } + /** + * Reads an enum with type E that was serialized based on the value of it's ordinal + */ + public <E extends Enum<E>> E readEnum(Class<E> enumClass) throws IOException { + int ordinal = readVInt(); + E[] values = enumClass.getEnumConstants(); + if (ordinal < 0 || ordinal >= values.length) { + throw new IOException("Unknown " + enumClass.getSimpleName() + " ordinal [" + ordinal + "]"); + } + return values[ordinal]; + } + public static StreamInput wrap(byte[] bytes) { return wrap(bytes, 0, bytes.length); } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 4d57e7c1b88..5816a9fd469 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -936,4 +936,11 @@ public abstract class StreamOutput extends OutputStream { } } + /** + * Writes an enum with type E that by serialized it based on it's ordinal value + */ + public <E extends Enum<E>> void writeEnum(E enumValue) throws IOException { + writeVInt(enumValue.ordinal()); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index acf3f9ffdf8..60673777546 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.lucene.search; import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; -import org.apache.lucene.search.AutomatonQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; @@ -31,9 +30,6 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.automaton.Automata; -import org.apache.lucene.util.automaton.Automaton; -import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.mapper.TypeFieldMapper; @@ -42,29 +38,6 @@ import java.util.regex.Pattern; public class Queries { - private static final Automaton NON_NESTED_TYPE_AUTOMATON; - static { - Automaton nestedTypeAutomaton = Operations.concatenate( - Automata.makeString("__"), - Automata.makeAnyString()); - NON_NESTED_TYPE_AUTOMATON = Operations.complement(nestedTypeAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES); - } - - // We use a custom class rather than AutomatonQuery directly in order to - // have a better toString - private static class NonNestedQuery extends AutomatonQuery { - - NonNestedQuery() { - super(new Term(TypeFieldMapper.NAME), NON_NESTED_TYPE_AUTOMATON); - } - - @Override - public String toString(String field) { - return "_type:[^_].*"; - } - - } - public static Query newMatchAllQuery() { return new MatchAllDocsQuery(); } @@ -79,9 +52,11 @@ public class Queries { } public static Query newNonNestedFilter() { - // we use this automaton query rather than a negation of newNestedFilter - // since purely negative queries against high-cardinality clauses are costly - return new NonNestedQuery(); + // TODO: this is slow, make it a positive query + return new BooleanQuery.Builder() + .add(new MatchAllDocsQuery(), Occur.FILTER) + .add(newNestedFilter(), Occur.MUST_NOT) + .build(); } public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java index e8bd2887998..90d110c3804 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java @@ -143,15 +143,11 @@ public enum CombineFunction implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static CombineFunction readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown CombineFunction ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(CombineFunction.class); } public static CombineFunction fromString(String combineFunction) { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index e225df040ab..1978c0acf0d 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -191,15 +191,11 @@ public class FieldValueFactorFunction extends ScoreFunction { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static Modifier readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown Modifier ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(Modifier.class); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index abf145406c5..2f2a70537c0 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -81,15 +81,11 @@ public class FiltersFunctionScoreQuery extends Query { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static ScoreMode readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown ScoreMode ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(ScoreMode.class); } public static ScoreMode fromString(String scoreMode) { diff --git a/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java b/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java index de23663b3e9..20c12d564da 100644 --- a/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java +++ b/core/src/main/java/org/elasticsearch/common/util/ArrayUtils.java @@ -84,5 +84,4 @@ public class ArrayUtils { System.arraycopy(other, 0, target, one.length, other.length); return target; } - } diff --git a/core/src/main/java/org/elasticsearch/discovery/Discovery.java b/core/src/main/java/org/elasticsearch/discovery/Discovery.java index 4c28b51bc4f..7f68f417fcb 100644 --- a/core/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; @@ -37,12 +36,6 @@ import java.io.IOException; */ public interface Discovery extends LifecycleComponent { - /** - * Another hack to solve dep injection problem..., note, this will be called before - * any start is called. - */ - void setAllocationService(AllocationService allocationService); - /** * Publish all the changes to the cluster from the master (can be called just by the master). The publish * process should apply this state to the master as well! diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 43fccab1a28..b2367c6e953 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -58,7 +59,8 @@ public class DiscoveryModule { public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, - ClusterApplier clusterApplier, ClusterSettings clusterSettings, List<DiscoveryPlugin> plugins) { + ClusterApplier clusterApplier, ClusterSettings clusterSettings, List<DiscoveryPlugin> plugins, + AllocationService allocationService) { final UnicastHostsProvider hostsProvider; Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>(); @@ -83,12 +85,12 @@ public class DiscoveryModule { Map<String, Supplier<Discovery>> discoveryTypes = new HashMap<>(); discoveryTypes.put("zen", () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider)); + clusterSettings, hostsProvider, allocationService)); discoveryTypes.put("tribe", () -> new TribeDiscovery(settings, transportService, clusterApplier)); discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, clusterApplier)); for (DiscoveryPlugin plugin : plugins) { plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, - masterService, clusterApplier, clusterSettings, hostsProvider).entrySet().forEach(entry -> { + masterService, clusterApplier, clusterSettings, hostsProvider, allocationService).entrySet().forEach(entry -> { if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) { throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice"); } diff --git a/core/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java index d7275e7be91..11526961797 100644 --- a/core/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; @@ -59,11 +58,6 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D this.clusterApplier = clusterApplier; } - @Override - public void setAllocationService(final AllocationService allocationService) { - - } - @Override public synchronized void publish(final ClusterChangedEvent event, final AckListener ackListener) { diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index fe94cea597d..d08b148554b 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -109,7 +109,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final TransportService transportService; private final MasterService masterService; - private AllocationService allocationService; private final ClusterName clusterName; private final DiscoverySettings discoverySettings; protected final ZenPing zenPing; // protected to allow tests access @@ -140,9 +139,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover private final JoinThreadControl joinThreadControl; - // must initialized in doStart(), when we have the allocationService set - private volatile NodeJoinController nodeJoinController; - private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; + private final NodeJoinController nodeJoinController; + private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final ClusterApplier clusterApplier; private final AtomicReference<ClusterState> state; // last committed cluster state @@ -151,7 +149,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService) { super(settings); this.masterService = masterService; this.clusterApplier = clusterApplier; @@ -213,6 +211,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover this.membership = new MembershipAction(settings, transportService, new MembershipListener()); this.joinThreadControl = new JoinThreadControl(); + this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster, settings); + this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger); + transportService.registerRequestHandler( DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); } @@ -223,11 +224,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover return new UnicastZenPing(settings, threadPool, transportService, hostsProvider); } - @Override - public void setAllocationService(AllocationService allocationService) { - this.allocationService = allocationService; - } - @Override protected void doStart() { DiscoveryNode localNode = transportService.getLocalNode(); @@ -239,8 +235,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover joinThreadControl.start(); } zenPing.start(this); - this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster, settings); - this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger); } @Override diff --git a/core/src/main/java/org/elasticsearch/env/ESFileStore.java b/core/src/main/java/org/elasticsearch/env/ESFileStore.java index 8ac6cf8a02a..bba1e1e6096 100644 --- a/core/src/main/java/org/elasticsearch/env/ESFileStore.java +++ b/core/src/main/java/org/elasticsearch/env/ESFileStore.java @@ -20,7 +20,6 @@ package org.elasticsearch.env; import org.apache.lucene.util.Constants; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -43,24 +42,15 @@ import java.util.List; class ESFileStore extends FileStore { /** Underlying filestore */ final FileStore in; - /** Cached result of Lucene's {@code IOUtils.spins} on path. */ - final Boolean spins; - int majorDeviceNumber; - int minorDeviceNumber; + private int majorDeviceNumber; + private int minorDeviceNumber; @SuppressForbidden(reason = "tries to determine if disk is spinning") // TODO: move PathUtils to be package-private here instead of // public+forbidden api! - ESFileStore(FileStore in) { + ESFileStore(final FileStore in) { this.in = in; - Boolean spins; - // Lucene's IOUtils.spins only works on Linux today: if (Constants.LINUX) { - try { - spins = IOUtils.spins(PathUtils.get(getMountPointLinux(in))); - } catch (Exception e) { - spins = null; - } try { final List<String> lines = Files.readAllLines(PathUtils.get("/proc/self/mountinfo")); for (final String line : lines) { @@ -70,20 +60,21 @@ class ESFileStore extends FileStore { final String[] deviceNumbers = fields[2].split(":"); majorDeviceNumber = Integer.parseInt(deviceNumbers[0]); minorDeviceNumber = Integer.parseInt(deviceNumbers[1]); + break; } } - } catch (Exception e) { + } catch (final Exception e) { majorDeviceNumber = -1; minorDeviceNumber = -1; } } else { - spins = null; + majorDeviceNumber = -1; + minorDeviceNumber = -1; } - this.spins = spins; } - + // these are hacks that are not guaranteed - private static String getMountPointLinux(FileStore store) { + private static String getMountPointLinux(final FileStore store) { String desc = store.toString(); int index = desc.lastIndexOf(" ("); if (index != -1) { @@ -92,109 +83,6 @@ class ESFileStore extends FileStore { return desc; } } - - /** - * Files.getFileStore(Path) useless here! Don't complain, just try it yourself. - */ - @SuppressForbidden(reason = "works around the bugs") - static FileStore getMatchingFileStore(Path path, FileStore fileStores[]) throws IOException { - if (Constants.WINDOWS) { - return getFileStoreWindows(path, fileStores); - } - - final FileStore store; - try { - store = Files.getFileStore(path); - } catch (IOException unexpected) { - // give a better error message if a filestore cannot be retrieved from inside a FreeBSD jail. - if (Constants.FREE_BSD) { - throw new IOException("Unable to retrieve mount point data for " + path + - ". If you are running within a jail, set enforce_statfs=1. See jail(8)", unexpected); - } else { - throw unexpected; - } - } - - try { - String mount = getMountPointLinux(store); - FileStore sameMountPoint = null; - for (FileStore fs : fileStores) { - if (mount.equals(getMountPointLinux(fs))) { - if (sameMountPoint == null) { - sameMountPoint = fs; - } else { - // more than one filesystem has the same mount point; something is wrong! - // fall back to crappy one we got from Files.getFileStore - return store; - } - } - } - - if (sameMountPoint != null) { - // ok, we found only one, use it: - return sameMountPoint; - } else { - // fall back to crappy one we got from Files.getFileStore - return store; - } - } catch (Exception e) { - // ignore - } - - // fall back to crappy one we got from Files.getFileStore - return store; - } - - /** - * remove this code and just use getFileStore for windows on java 9 - * works around https://bugs.openjdk.java.net/browse/JDK-8034057 - */ - @SuppressForbidden(reason = "works around https://bugs.openjdk.java.net/browse/JDK-8034057") - static FileStore getFileStoreWindows(Path path, FileStore fileStores[]) throws IOException { - assert Constants.WINDOWS; - - try { - return Files.getFileStore(path); - } catch (FileSystemException possibleBug) { - final char driveLetter; - // look for a drive letter to see if its the SUBST bug, - // it might be some other type of path, like a windows share - // if something goes wrong, we just deliver the original exception - try { - String root = path.toRealPath().getRoot().toString(); - if (root.length() < 2) { - throw new RuntimeException("root isn't a drive letter: " + root); - } - driveLetter = Character.toLowerCase(root.charAt(0)); - if (Character.isAlphabetic(driveLetter) == false || root.charAt(1) != ':') { - throw new RuntimeException("root isn't a drive letter: " + root); - } - } catch (Exception checkFailed) { - // something went wrong, - possibleBug.addSuppressed(checkFailed); - throw possibleBug; - } - - // we have a drive letter: the hack begins!!!!!!!! - try { - // we have no choice but to parse toString of all stores and find the matching drive letter - for (FileStore store : fileStores) { - String toString = store.toString(); - int length = toString.length(); - if (length > 3 && toString.endsWith(":)") && toString.charAt(length - 4) == '(') { - if (Character.toLowerCase(toString.charAt(length - 3)) == driveLetter) { - return store; - } - } - } - throw new RuntimeException("no filestores matched"); - } catch (Exception weTried) { - IOException newException = new IOException("Unable to retrieve filestore for '" + path + "', tried matching against " + Arrays.toString(fileStores), weTried); - newException.addSuppressed(possibleBug); - throw newException; - } - } - } @Override public String name() { @@ -263,8 +151,6 @@ class ESFileStore extends FileStore { @Override public Object getAttribute(String attribute) throws IOException { switch(attribute) { - // for the device - case "lucene:spins": return spins; // for the partition case "lucene:major_device_number": return majorDeviceNumber; case "lucene:minor_device_number": return minorDeviceNumber; diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index f431a7f646e..0d30f7b576c 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; @@ -35,7 +36,9 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.function.Function; @@ -97,22 +100,6 @@ public class Environment { /** Path to the temporary file directory used by the JDK */ private final Path tmpFile = PathUtils.get(System.getProperty("java.io.tmpdir")); - /** List of filestores on the system */ - private static final FileStore[] fileStores; - - /** - * We have to do this in clinit instead of init, because ES code is pretty messy, - * and makes these environments, throws them away, makes them again, etc. - */ - static { - // gather information about filesystems - ArrayList<FileStore> allStores = new ArrayList<>(); - for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) { - allStores.add(new ESFileStore(store)); - } - fileStores = allStores.toArray(new ESFileStore[allStores.size()]); - } - public Environment(Settings settings) { final Path homeFile; if (PATH_HOME_SETTING.exists(settings)) { @@ -331,24 +318,8 @@ public class Environment { return tmpFile; } - /** - * Looks up the filestore associated with a Path. - * <p> - * This is an enhanced version of {@link Files#getFileStore(Path)}: - * <ul> - * <li>On *nix systems, the store returned for the root filesystem will contain - * the actual filesystem type (e.g. {@code ext4}) instead of {@code rootfs}. - * <li>On some systems, the custom attribute {@code lucene:spins} is supported - * via the {@link FileStore#getAttribute(String)} method. - * <li>Only requires the security permissions of {@link Files#getFileStore(Path)}, - * no permissions to the actual mount point are required. - * <li>Exception handling has the same semantics as {@link Files#getFileStore(Path)}. - * <li>Works around https://bugs.openjdk.java.net/browse/JDK-8034057. - * <li>Gives a better exception when filestore cannot be retrieved from inside a FreeBSD jail. - * </ul> - */ - public static FileStore getFileStore(Path path) throws IOException { - return ESFileStore.getMatchingFileStore(path, fileStores); + public static FileStore getFileStore(final Path path) throws IOException { + return new ESFileStore(Files.getFileStore(path)); } /** diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index dec59f97f42..a24c3591374 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -94,9 +94,6 @@ public final class NodeEnvironment implements Closeable { public final Path indicesPath; /** Cached FileStore from path */ public final FileStore fileStore; - /** Cached result of Lucene's {@code IOUtils.spins} on path. This is a trilean value: null means we could not determine it (we are - * not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */ - public final Boolean spins; public final int majorDeviceNumber; public final int minorDeviceNumber; @@ -106,11 +103,9 @@ public final class NodeEnvironment implements Closeable { this.indicesPath = path.resolve(INDICES_FOLDER); this.fileStore = Environment.getFileStore(path); if (fileStore.supportsFileAttributeView("lucene")) { - this.spins = (Boolean) fileStore.getAttribute("lucene:spins"); this.majorDeviceNumber = (int) fileStore.getAttribute("lucene:major_device_number"); this.minorDeviceNumber = (int) fileStore.getAttribute("lucene:minor_device_number"); } else { - this.spins = null; this.majorDeviceNumber = -1; this.minorDeviceNumber = -1; } @@ -136,9 +131,13 @@ public final class NodeEnvironment implements Closeable { public String toString() { return "NodePath{" + "path=" + path + - ", spins=" + spins + + ", indicesPath=" + indicesPath + + ", fileStore=" + fileStore + + ", majorDeviceNumber=" + majorDeviceNumber + + ", minorDeviceNumber=" + minorDeviceNumber + '}'; } + } private final NodePath[] nodePaths; @@ -304,15 +303,6 @@ public final class NodeEnvironment implements Closeable { for (NodePath nodePath : nodePaths) { sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath()); - String spinsDesc; - if (nodePath.spins == null) { - spinsDesc = "unknown"; - } else if (nodePath.spins) { - spinsDesc = "possibly"; - } else { - spinsDesc = "no"; - } - FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath); sb.append(", free_space [") .append(fsPath.getFree()) @@ -320,8 +310,6 @@ public final class NodeEnvironment implements Closeable { .append(fsPath.getAvailable()) .append("], total_space [") .append(fsPath.getTotal()) - .append("], spins? [") - .append(spinsDesc) .append("], mount [") .append(fsPath.getMount()) .append("], type [") @@ -332,7 +320,6 @@ public final class NodeEnvironment implements Closeable { } else if (logger.isInfoEnabled()) { FsInfo.Path totFSPath = new FsInfo.Path(); Set<String> allTypes = new HashSet<>(); - Set<String> allSpins = new HashSet<>(); Set<String> allMounts = new HashSet<>(); for (NodePath nodePath : nodePaths) { FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath); @@ -343,21 +330,13 @@ public final class NodeEnvironment implements Closeable { if (type != null) { allTypes.add(type); } - Boolean spins = fsPath.getSpins(); - if (spins == null) { - allSpins.add("unknown"); - } else if (spins.booleanValue()) { - allSpins.add("possibly"); - } else { - allSpins.add("no"); - } totFSPath.add(fsPath); } } // Just log a 1-line summary: - logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]", - nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes)); + logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], types [{}]", + nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allTypes)); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 69f7af6eef5..c616716b86a 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -43,7 +43,7 @@ import java.util.concurrent.ConcurrentMap; public class GatewayAllocator extends AbstractComponent { - private RoutingService routingService; + private final RoutingService routingService; private final PrimaryShardAllocator primaryShardAllocator; private final ReplicaShardAllocator replicaShardAllocator; @@ -52,14 +52,12 @@ public class GatewayAllocator extends AbstractComponent { private final ConcurrentMap<ShardId, AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData>> asyncFetchStore = ConcurrentCollections.newConcurrentMap(); @Inject - public GatewayAllocator(Settings settings, final TransportNodesListGatewayStartedShards startedAction, final TransportNodesListShardStoreMetaData storeAction) { + public GatewayAllocator(Settings settings, ClusterService clusterService, RoutingService routingService, + TransportNodesListGatewayStartedShards startedAction, TransportNodesListShardStoreMetaData storeAction) { super(settings); + this.routingService = routingService; this.primaryShardAllocator = new InternalPrimaryShardAllocator(settings, startedAction); this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction); - } - - public void setReallocation(final ClusterService clusterService, final RoutingService routingService) { - this.routingService = routingService; clusterService.addStateApplier(event -> { boolean cleanCache = false; DiscoveryNode localNode = event.state().nodes().getLocalNode(); @@ -79,6 +77,14 @@ public class GatewayAllocator extends AbstractComponent { }); } + // for tests + protected GatewayAllocator(Settings settings) { + super(settings); + this.routingService = null; + this.primaryShardAllocator = null; + this.replicaShardAllocator = null; + } + public int getNumberOfInFlightFetch() { int count = 0; for (AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch : asyncFetchStarted.values()) { diff --git a/core/src/main/java/org/elasticsearch/index/VersionType.java b/core/src/main/java/org/elasticsearch/index/VersionType.java index fcbd6690a38..c5094ea185d 100644 --- a/core/src/main/java/org/elasticsearch/index/VersionType.java +++ b/core/src/main/java/org/elasticsearch/index/VersionType.java @@ -364,13 +364,11 @@ public enum VersionType implements Writeable { } public static VersionType readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - assert (ordinal == 0 || ordinal == 1 || ordinal == 2 || ordinal == 3); - return VersionType.values()[ordinal]; + return in.readEnum(VersionType.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ConstantIndexFieldData.java similarity index 83% rename from core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java rename to core/src/main/java/org/elasticsearch/index/fielddata/plain/ConstantIndexFieldData.java index 53832c0b5b1..ebf959e92e1 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ConstantIndexFieldData.java @@ -44,26 +44,33 @@ import org.elasticsearch.search.MultiValueMode; import java.io.IOException; import java.util.Collection; import java.util.Collections; +import java.util.function.Function; -public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData { +public class ConstantIndexFieldData extends AbstractIndexOrdinalsFieldData { public static class Builder implements IndexFieldData.Builder { + private final Function<MapperService, String> valueFunction; + + public Builder(Function<MapperService, String> valueFunction) { + this.valueFunction = valueFunction; + } + @Override public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { - return new IndexIndexFieldData(indexSettings, fieldType.name()); + return new ConstantIndexFieldData(indexSettings, fieldType.name(), valueFunction.apply(mapperService)); } } - private static class IndexAtomicFieldData extends AbstractAtomicOrdinalsFieldData { + private static class ConstantAtomicFieldData extends AbstractAtomicOrdinalsFieldData { - private final String index; + private final String value; - IndexAtomicFieldData(String index) { + ConstantAtomicFieldData(String value) { super(DEFAULT_SCRIPT_FUNCTION); - this.index = index; + this.value = value; } @Override @@ -78,7 +85,7 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData { @Override public SortedSetDocValues getOrdinalsValues() { - final BytesRef term = new BytesRef(index); + final BytesRef term = new BytesRef(value); final SortedDocValues sortedValues = new AbstractSortedDocValues() { private int docID = -1; @@ -120,12 +127,12 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData { private final AtomicOrdinalsFieldData atomicFieldData; - private IndexIndexFieldData(IndexSettings indexSettings, String name) { + private ConstantIndexFieldData(IndexSettings indexSettings, String name, String value) { super(indexSettings, name, null, null, TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY, TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY, TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE); - atomicFieldData = new IndexAtomicFieldData(index().getName()); + atomicFieldData = new ConstantAtomicFieldData(value); } @Override @@ -144,7 +151,8 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData { } @Override - public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, + boolean reverse) { final XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); return new SortField(getFieldName(), source, reverse); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 39280bcee20..a8d172a5112 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -24,17 +24,16 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -241,8 +240,8 @@ public class DocumentMapper implements ToXContent { return metadataMapper(IndexFieldMapper.class); } - public Query typeFilter() { - return typeMapper().fieldType().termQuery(type, null); + public Query typeFilter(QueryShardContext context) { + return typeMapper().fieldType().termQuery(type, context); } public boolean hasNestedObjects() { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java index c961b74d77a..c85f5d7d9fa 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/IndexFieldMapper.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.plain.IndexIndexFieldData; +import org.elasticsearch.index.fielddata.plain.ConstantIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; @@ -157,7 +157,7 @@ public class IndexFieldMapper extends MetadataFieldMapper { @Override public IndexFieldData.Builder fielddataBuilder() { - return new IndexIndexFieldData.Builder(); + return new ConstantIndexFieldData.Builder(mapperService -> mapperService.index().getName()); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index cca285198ef..bd65d123f81 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -99,12 +99,10 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public static final Setting<Boolean> INDEX_MAPPING_SINGLE_TYPE_SETTING; static { Function<Settings, String> defValue = settings -> { - // TODO: uncomment this - /*boolean singleType = true; + boolean singleType = true; if (settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) != null) { singleType = Version.indexCreated(settings).onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED); - }*/ - boolean singleType = false; + } return Boolean.valueOf(singleType).toString(); }; INDEX_MAPPING_SINGLE_TYPE_SETTING = Setting.boolSetting("index.mapping.single_type", defValue, Property.IndexScope, Property.Final); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index 9a271916ac1..257792b8b09 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -22,6 +22,8 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.DoubleRange; import org.apache.lucene.document.FloatRange; import org.apache.lucene.document.IntRange; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.InetAddressRange; import org.apache.lucene.document.LongRange; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; @@ -29,12 +31,12 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.LocaleUtils; @@ -45,6 +47,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.joda.time.DateTimeZone; import java.io.IOException; +import java.net.InetAddress; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -341,8 +344,8 @@ public class RangeFieldMapper extends FieldMapper { RangeFieldType fieldType = fieldType(); RangeType rangeType = fieldType.rangeType; String fieldName = null; - Number from = rangeType.minValue(); - Number to = rangeType.maxValue(); + Object from = rangeType.minValue(); + Object to = rangeType.maxValue(); boolean includeFrom = DEFAULT_INCLUDE_LOWER; boolean includeTo = DEFAULT_INCLUDE_UPPER; XContentParser.Token token; @@ -427,10 +430,72 @@ public class RangeFieldMapper extends FieldMapper { /** Enum defining the type of range */ public enum RangeType { + IP("ip_range") { + @Override + public Field getRangeField(String name, Range r) { + return new InetAddressRange(name, (InetAddress)r.from, (InetAddress)r.to); + } + @Override + public InetAddress parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) + throws IOException { + InetAddress address = InetAddresses.forString(parser.text()); + return included ? address : nextUp(address); + } + @Override + public InetAddress parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) + throws IOException { + InetAddress address = InetAddresses.forString(parser.text()); + return included ? address : nextDown(address); + } + @Override + public InetAddress parse(Object value, boolean coerce) { + return value instanceof InetAddress ? (InetAddress) value : InetAddresses.forString((String) value); + } + @Override + public InetAddress minValue() { + return InetAddressPoint.MIN_VALUE; + } + @Override + public InetAddress maxValue() { + return InetAddressPoint.MAX_VALUE; + } + @Override + public InetAddress nextUp(Object value) { + return InetAddressPoint.nextUp((InetAddress)value); + } + @Override + public InetAddress nextDown(Object value) { + return InetAddressPoint.nextDown((InetAddress)value); + } + @Override + public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { + InetAddress lower = (InetAddress)from; + InetAddress upper = (InetAddress)to; + return InetAddressRange.newWithinQuery(field, + includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper)); + } + @Override + public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { + InetAddress lower = (InetAddress)from; + InetAddress upper = (InetAddress)to; + return InetAddressRange.newContainsQuery(field, + includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper)); + } + @Override + public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { + InetAddress lower = (InetAddress)from; + InetAddress upper = (InetAddress)to; + return InetAddressRange.newIntersectsQuery(field, + includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper)); + } + public String toString(InetAddress address) { + return InetAddresses.toAddrString(address); + } + }, DATE("date_range", NumberType.LONG) { @Override public Field getRangeField(String name, Range r) { - return new LongRange(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()}); + return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()}); } private Number parse(DateMathParser dateMathParser, String dateStr) { return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}); @@ -456,16 +521,12 @@ public class RangeFieldMapper extends FieldMapper { return Long.MAX_VALUE; } @Override - public Number nextUp(Number value) { - return LONG.nextUp(value); + public Long nextUp(Object value) { + return (long) LONG.nextUp(value); } @Override - public Number nextDown(Number value) { - return LONG.nextDown(value); - } - @Override - public byte[] getBytes(Range r) { - return LONG.getBytes(r); + public Long nextDown(Object value) { + return (long) LONG.nextDown(value); } @Override public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @@ -484,15 +545,15 @@ public class RangeFieldMapper extends FieldMapper { return super.rangeQuery(field, low, high, includeLower, includeUpper, relation, zone, dateMathParser, context); } @Override - public Query withinQuery(String field, Number from, Number to, boolean includeLower, boolean includeUpper) { + public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { return LONG.withinQuery(field, from, to, includeLower, includeUpper); } @Override - public Query containsQuery(String field, Number from, Number to, boolean includeLower, boolean includeUpper) { + public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { return LONG.containsQuery(field, from, to, includeLower, includeUpper); } @Override - public Query intersectsQuery(String field, Number from, Number to, boolean includeLower, boolean includeUpper) { + public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) { return LONG.intersectsQuery(field, from, to, includeLower, includeUpper); } }, @@ -507,38 +568,31 @@ public class RangeFieldMapper extends FieldMapper { return Float.POSITIVE_INFINITY; } @Override - public Float nextUp(Number value) { - return Math.nextUp(value.floatValue()); + public Float nextUp(Object value) { + return Math.nextUp(((Number)value).floatValue()); } @Override - public Float nextDown(Number value) { - return Math.nextDown(value.floatValue()); + public Float nextDown(Object value) { + return Math.nextDown(((Number)value).floatValue()); } @Override public Field getRangeField(String name, Range r) { - return new FloatRange(name, new float[] {r.from.floatValue()}, new float[] {r.to.floatValue()}); + return new FloatRange(name, new float[] {((Number)r.from).floatValue()}, new float[] {((Number)r.to).floatValue()}); } @Override - public byte[] getBytes(Range r) { - byte[] b = new byte[Float.BYTES*2]; - NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(r.from.floatValue()), b, 0); - NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(r.to.floatValue()), b, Float.BYTES); - return b; - } - @Override - public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return FloatRange.newWithinQuery(field, new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)}, new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)}); } @Override - public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return FloatRange.newContainsQuery(field, new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)}, new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)}); } @Override - public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return FloatRange.newIntersectsQuery(field, new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)}, new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)}); @@ -554,38 +608,31 @@ public class RangeFieldMapper extends FieldMapper { return Double.POSITIVE_INFINITY; } @Override - public Double nextUp(Number value) { - return Math.nextUp(value.doubleValue()); + public Double nextUp(Object value) { + return Math.nextUp(((Number)value).doubleValue()); } @Override - public Double nextDown(Number value) { - return Math.nextDown(value.doubleValue()); + public Double nextDown(Object value) { + return Math.nextDown(((Number)value).doubleValue()); } @Override public Field getRangeField(String name, Range r) { - return new DoubleRange(name, new double[] {r.from.doubleValue()}, new double[] {r.to.doubleValue()}); + return new DoubleRange(name, new double[] {((Number)r.from).doubleValue()}, new double[] {((Number)r.to).doubleValue()}); } @Override - public byte[] getBytes(Range r) { - byte[] b = new byte[Double.BYTES*2]; - NumericUtils.longToSortableBytes(NumericUtils.doubleToSortableLong(r.from.doubleValue()), b, 0); - NumericUtils.longToSortableBytes(NumericUtils.doubleToSortableLong(r.to.doubleValue()), b, Double.BYTES); - return b; - } - @Override - public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return DoubleRange.newWithinQuery(field, new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)}, new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)}); } @Override - public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return DoubleRange.newContainsQuery(field, new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)}, new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)}); } @Override - public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return DoubleRange.newIntersectsQuery(field, new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)}, new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)}); @@ -603,36 +650,29 @@ public class RangeFieldMapper extends FieldMapper { return Integer.MAX_VALUE; } @Override - public Integer nextUp(Number value) { - return value.intValue() + 1; + public Integer nextUp(Object value) { + return ((Number)value).intValue() + 1; } @Override - public Integer nextDown(Number value) { - return value.intValue() - 1; + public Integer nextDown(Object value) { + return ((Number)value).intValue() - 1; } @Override public Field getRangeField(String name, Range r) { - return new IntRange(name, new int[] {r.from.intValue()}, new int[] {r.to.intValue()}); + return new IntRange(name, new int[] {((Number)r.from).intValue()}, new int[] {((Number)r.to).intValue()}); } @Override - public byte[] getBytes(Range r) { - byte[] b = new byte[Integer.BYTES*2]; - NumericUtils.intToSortableBytes(r.from.intValue(), b, 0); - NumericUtils.intToSortableBytes(r.to.intValue(), b, Integer.BYTES); - return b; - } - @Override - public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return IntRange.newWithinQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, new int[] {(Integer)to - (includeTo ? 0 : 1)}); } @Override - public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return IntRange.newContainsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, new int[] {(Integer)to - (includeTo ? 0 : 1)}); } @Override - public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return IntRange.newIntersectsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, new int[] {(Integer)to - (includeTo ? 0 : 1)}); } @@ -647,43 +687,40 @@ public class RangeFieldMapper extends FieldMapper { return Long.MAX_VALUE; } @Override - public Long nextUp(Number value) { - return value.longValue() + 1; + public Long nextUp(Object value) { + return ((Number)value).longValue() + 1; } @Override - public Long nextDown(Number value) { - return value.longValue() - 1; + public Long nextDown(Object value) { + return ((Number)value).longValue() - 1; } @Override public Field getRangeField(String name, Range r) { - return new LongRange(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()}); + return new LongRange(name, new long[] {((Number)r.from).longValue()}, + new long[] {((Number)r.to).longValue()}); } @Override - public byte[] getBytes(Range r) { - byte[] b = new byte[Long.BYTES*2]; - long from = r.from == null ? Long.MIN_VALUE : r.from.longValue(); - long to = r.to == null ? Long.MAX_VALUE : r.to.longValue(); - NumericUtils.longToSortableBytes(from, b, 0); - NumericUtils.longToSortableBytes(to, b, Long.BYTES); - return b; - } - @Override - public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return LongRange.newWithinQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, new long[] {(Long)to - (includeTo ? 0 : 1)}); } @Override - public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return LongRange.newContainsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, new long[] {(Long)to - (includeTo ? 0 : 1)}); } @Override - public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) { return LongRange.newIntersectsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, new long[] {(Long)to - (includeTo ? 0 : 1)}); } }; + RangeType(String name) { + this.name = name; + this.numberType = null; + } + RangeType(String name, NumberType type) { this.name = name; this.numberType = type; @@ -694,7 +731,6 @@ public class RangeFieldMapper extends FieldMapper { return name; } - protected abstract byte[] getBytes(Range range); public abstract Field getRangeField(String name, Range range); public List<IndexableField> createFields(String name, Range range, boolean indexed, boolean docValued, boolean stored) { assert range != null : "range cannot be null when creating fields"; @@ -709,29 +745,31 @@ public class RangeFieldMapper extends FieldMapper { return fields; } /** parses from value. rounds according to included flag */ - public Number parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException { + public Object parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException { Number value = numberType.parse(parser, coerce); - return included ? value : nextUp(value); + return included ? value : (Number)nextUp(value); } /** parses to value. rounds according to included flag */ - public Number parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException { + public Object parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException { Number value = numberType.parse(parser, coerce); - return included ? value : nextDown(value); + return included ? value : (Number)nextDown(value); } - public abstract Number minValue(); - public abstract Number maxValue(); - public abstract Number nextUp(Number value); - public abstract Number nextDown(Number value); - public abstract Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo); - public abstract Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo); - public abstract Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo); - + public abstract Object minValue(); + public abstract Object maxValue(); + public abstract Object nextUp(Object value); + public abstract Object nextDown(Object value); + public abstract Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); + public abstract Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); + public abstract Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo); + public Object parse(Object value, boolean coerce) { + return numberType.parse(value, coerce); + } public Query rangeQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo, ShapeRelation relation, @Nullable DateTimeZone timeZone, @Nullable DateMathParser dateMathParser, QueryShardContext context) { - Number lower = from == null ? minValue() : numberType.parse(from, false); - Number upper = to == null ? maxValue() : numberType.parse(to, false); + Object lower = from == null ? minValue() : parse(from, false); + Object upper = to == null ? maxValue() : parse(to, false); if (relation == ShapeRelation.WITHIN) { return withinQuery(field, lower, upper, includeFrom, includeTo); } else if (relation == ShapeRelation.CONTAINS) { @@ -747,12 +785,12 @@ public class RangeFieldMapper extends FieldMapper { /** Class defining a range */ public static class Range { RangeType type; - private Number from; - private Number to; + private Object from; + private Object to; private boolean includeFrom; private boolean includeTo; - public Range(RangeType type, Number from, Number to, boolean includeFrom, boolean includeTo) { + public Range(RangeType type, Object from, Object to, boolean includeFrom, boolean includeTo) { this.type = type; this.from = from; this.to = to; @@ -764,9 +802,11 @@ public class RangeFieldMapper extends FieldMapper { public String toString() { StringBuilder sb = new StringBuilder(); sb.append(includeFrom ? '[' : '('); - sb.append(includeFrom || from.equals(type.minValue()) ? from : type.nextDown(from)); - sb.append(':'); - sb.append(includeTo || to.equals(type.maxValue()) ? to : type.nextUp(to)); + Object f = includeFrom || from.equals(type.minValue()) ? from : type.nextDown(from); + Object t = includeTo || to.equals(type.maxValue()) ? to : type.nextUp(to); + sb.append(type == RangeType.IP ? InetAddresses.toAddrString((InetAddress)f) : f.toString()); + sb.append(" : "); + sb.append(type == RangeType.IP ? InetAddresses.toAddrString((InetAddress)t) : t.toString()); sb.append(includeTo ? ']' : ')'); return sb.toString(); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index c24747e62c8..9d4a4a6987b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -30,26 +30,29 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; -import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData; +import org.elasticsearch.index.fielddata.plain.ConstantIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; +import java.util.function.Function; public class TypeFieldMapper extends MetadataFieldMapper { @@ -88,29 +91,12 @@ public class TypeFieldMapper extends MetadataFieldMapper { } static final class TypeFieldType extends StringFieldType { - private boolean fielddata; TypeFieldType() { - this.fielddata = false; } protected TypeFieldType(TypeFieldType ref) { super(ref); - this.fielddata = ref.fielddata; - } - - @Override - public boolean equals(Object o) { - if (super.equals(o) == false) { - return false; - } - TypeFieldType that = (TypeFieldType) o; - return fielddata == that.fielddata; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), fielddata); } @Override @@ -123,49 +109,76 @@ public class TypeFieldMapper extends MetadataFieldMapper { return CONTENT_TYPE; } - public boolean fielddata() { - return fielddata; - } - - public void setFielddata(boolean fielddata) { - checkIfFrozen(); - this.fielddata = fielddata; - } - @Override public IndexFieldData.Builder fielddataBuilder() { if (hasDocValues()) { return new DocValuesIndexFieldData.Builder(); + } else { + // means the index has a single type and the type field is implicit + Function<MapperService, String> typeFunction = mapperService -> { + Collection<String> types = mapperService.types(); + if (types.size() > 1) { + throw new AssertionError(); + } + // If we reach here, there is necessarily one type since we were able to find a `_type` field + String type = types.iterator().next(); + return type; + }; + return new ConstantIndexFieldData.Builder(typeFunction); } - assert indexOptions() != IndexOptions.NONE; - if (fielddata) { - return new PagedBytesIndexFieldData.Builder(TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY, - TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY, - TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE); - } - return super.fielddataBuilder(); } @Override - public Query termQuery(Object value, @Nullable QueryShardContext context) { - if (indexOptions() == IndexOptions.NONE) { - throw new AssertionError(); + public FieldStats<?> stats(IndexReader reader) throws IOException { + if (reader.maxDoc() == 0) { + return null; } - return new TypesQuery(indexedValueForSearch(value)); + return new FieldStats.Text(reader.maxDoc(), reader.numDocs(), reader.maxDoc(), reader.maxDoc(), + isSearchable(), isAggregatable()); } @Override - public void checkCompatibility(MappedFieldType other, - List<String> conflicts, boolean strict) { - super.checkCompatibility(other, conflicts, strict); - TypeFieldType otherType = (TypeFieldType) other; - if (strict) { - if (fielddata() != otherType.fielddata()) { - conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] " - + "across all types."); + public boolean isSearchable() { + return true; + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + return termsQuery(Arrays.asList(value), context); + } + + @Override + public Query termsQuery(List<?> values, QueryShardContext context) { + if (context.getIndexSettings().getValue(MapperService.INDEX_MAPPING_SINGLE_TYPE_SETTING)) { + Collection<String> indexTypes = context.getMapperService().types(); + if (indexTypes.isEmpty()) { + return new MatchNoDocsQuery("No types"); } + assert indexTypes.size() == 1; + BytesRef indexType = indexedValueForSearch(indexTypes.iterator().next()); + if (values.stream() + .map(this::indexedValueForSearch) + .anyMatch(indexType::equals)) { + if (context.getMapperService().hasNested()) { + // type filters are expected not to match nested docs + return Queries.newNonNestedFilter(); + } else { + return new MatchAllDocsQuery(); + } + } else { + return new MatchNoDocsQuery("Type list does not contain the index type"); + } + } else { + if (indexOptions() == IndexOptions.NONE) { + throw new AssertionError(); + } + final BytesRef[] types = values.stream() + .map(this::indexedValueForSearch) + .toArray(size -> new BytesRef[size]); + return new TypesQuery(types); } } + } /** @@ -261,7 +274,13 @@ public class TypeFieldMapper extends MetadataFieldMapper { private static MappedFieldType defaultFieldType(Settings indexSettings) { MappedFieldType defaultFieldType = Defaults.FIELD_TYPE.clone(); - defaultFieldType.setHasDocValues(true); + if (MapperService.INDEX_MAPPING_SINGLE_TYPE_SETTING.get(indexSettings)) { + defaultFieldType.setIndexOptions(IndexOptions.NONE); + defaultFieldType.setHasDocValues(false); + } else { + defaultFieldType.setIndexOptions(IndexOptions.DOCS); + defaultFieldType.setHasDocValues(true); + } return defaultFieldType; } diff --git a/core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java index 37d06f79eb2..18ad7f9f310 100644 --- a/core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/HasChildQueryBuilder.java @@ -338,10 +338,10 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil } // wrap the query with type query - innerQuery = Queries.filtered(innerQuery, childDocMapper.typeFilter()); + innerQuery = Queries.filtered(innerQuery, childDocMapper.typeFilter(context)); final ParentChildIndexFieldData parentChildIndexFieldData = context.getForField(parentFieldMapper.fieldType()); - return new LateParsingQuery(parentDocMapper.typeFilter(), innerQuery, minChildren(), maxChildren(), + return new LateParsingQuery(parentDocMapper.typeFilter(context), innerQuery, minChildren(), maxChildren(), parentType, scoreMode, parentChildIndexFieldData, context.getSearchSimilarity()); } diff --git a/core/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java index 0dd0aea15c3..63c94846917 100644 --- a/core/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/HasParentQueryBuilder.java @@ -185,18 +185,18 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu Query childrenQuery; if (childTypes.size() == 1) { DocumentMapper documentMapper = context.getMapperService().documentMapper(childTypes.iterator().next()); - childrenQuery = documentMapper.typeFilter(); + childrenQuery = documentMapper.typeFilter(context); } else { BooleanQuery.Builder childrenFilter = new BooleanQuery.Builder(); for (String childrenTypeStr : childTypes) { DocumentMapper documentMapper = context.getMapperService().documentMapper(childrenTypeStr); - childrenFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD); + childrenFilter.add(documentMapper.typeFilter(context), BooleanClause.Occur.SHOULD); } childrenQuery = childrenFilter.build(); } // wrap the query with type query - innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter()); + innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter(context)); return new HasChildQueryBuilder.LateParsingQuery(childrenQuery, innerQuery, HasChildQueryBuilder.DEFAULT_MIN_CHILDREN, diff --git a/core/src/main/java/org/elasticsearch/index/query/Operator.java b/core/src/main/java/org/elasticsearch/index/query/Operator.java index 7972dbb49ad..de88abebad3 100644 --- a/core/src/main/java/org/elasticsearch/index/query/Operator.java +++ b/core/src/main/java/org/elasticsearch/index/query/Operator.java @@ -54,16 +54,12 @@ public enum Operator implements Writeable { } public static Operator readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown Operator ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(Operator.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static Operator fromString(String op) { diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 03642115976..24e42b41b20 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -69,14 +69,14 @@ public class QueryRewriteContext { * Returns the index settings for this context. This might return null if the * context has not index scope. */ - public final IndexSettings getIndexSettings() { + public IndexSettings getIndexSettings() { return indexSettings; } /** * Return the MapperService. */ - public final MapperService getMapperService() { + public MapperService getMapperService() { return mapperService; } diff --git a/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java index 88e655555f8..fc4df8efa81 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java @@ -132,7 +132,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> { // no type means no documents return new MatchNoDocsQuery(); } else { - return documentMapper.typeFilter(); + return documentMapper.typeFilter(context); } } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f1cef1fb663..5e004a4759c 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1523,20 +1523,20 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl verifyReplicationTarget(); final SequenceNumbersService seqNoService = getEngine().seqNoService(); final long localCheckpoint = seqNoService.getLocalCheckpoint(); - if (globalCheckpoint <= localCheckpoint) { - seqNoService.updateGlobalCheckpointOnReplica(globalCheckpoint); - } else { + if (globalCheckpoint > localCheckpoint) { /* * This can happen during recovery when the shard has started its engine but recovery is not finalized and is receiving global - * checkpoint updates from in-flight operations. However, since this shard is not yet contributing to calculating the global - * checkpoint, it can be the case that the global checkpoint update from the primary is ahead of the local checkpoint on this - * shard. In this case, we ignore the global checkpoint update. This should only happen if we are in the translog stage of - * recovery. Prior to this, the engine is not opened and this shard will not receive global checkpoint updates, and after this - * the shard will be contributing to calculations of the the global checkpoint. + * checkpoint updates. However, since this shard is not yet contributing to calculating the global checkpoint, it can be the + * case that the global checkpoint update from the primary is ahead of the local checkpoint on this shard. In this case, we + * ignore the global checkpoint update. This can happen if we are in the translog stage of recovery. Prior to this, the engine + * is not opened and this shard will not receive global checkpoint updates, and after this the shard will be contributing to + * calculations of the the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as + * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move + * to recovery finalization, or even finished recovery before the update arrives here. */ - assert recoveryState().getStage() == RecoveryState.Stage.TRANSLOG - : "expected recovery stage [" + RecoveryState.Stage.TRANSLOG + "] but was [" + recoveryState().getStage() + "]"; + return; } + seqNoService.updateGlobalCheckpointOnReplica(globalCheckpoint); } /** diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index e20eb42427f..6f5e9a52b4f 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -49,10 +49,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent { long free = -1; long available = -1; - /** Uses Lucene's {@code IOUtils.spins} method to try to determine if the device backed by spinning media. - * This is null if we could not determine it, true if it possibly spins, else false. */ - Boolean spins = null; - public Path() { } @@ -74,7 +70,9 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent { total = in.readLong(); free = in.readLong(); available = in.readLong(); - spins = in.readOptionalBoolean(); + if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { + in.readOptionalBoolean(); + } } @Override @@ -85,7 +83,9 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent { out.writeLong(total); out.writeLong(free); out.writeLong(available); - out.writeOptionalBoolean(spins); + if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { + out.writeOptionalBoolean(null); + } } public String getPath() { @@ -112,10 +112,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent { return new ByteSizeValue(available); } - public Boolean getSpins() { - return spins; - } - private long addLong(long current, long other) { if (other == -1) { return current; @@ -140,10 +136,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent { total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total)); free = FsProbe.adjustForHugeFilesystems(addLong(free, path.free)); available = FsProbe.adjustForHugeFilesystems(addLong(available, path.available)); - if (path.spins != null && path.spins.booleanValue()) { - // Spinning is contagious! - spins = Boolean.TRUE; - } } static final class Fields { @@ -156,7 +148,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent { static final String FREE_IN_BYTES = "free_in_bytes"; static final String AVAILABLE = "available"; static final String AVAILABLE_IN_BYTES = "available_in_bytes"; - static final String SPINS = "spins"; } @Override @@ -181,9 +172,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent { if (available != -1) { builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, available); } - if (spins != null) { - builder.field(Fields.SPINS, spins.toString()); - } builder.endObject(); return builder; diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index 1fdae49a6f1..8e3cd53e74f 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -159,7 +159,6 @@ public class FsProbe extends AbstractComponent { fsPath.available = nodePath.fileStore.getUsableSpace(); fsPath.type = nodePath.fileStore.type(); fsPath.mount = nodePath.fileStore.toString(); - fsPath.spins = nodePath.spins; return fsPath; } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 9c933b1da70..28d2f42f592 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -49,7 +49,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; -import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.SuppressForbidden; @@ -352,7 +351,7 @@ public class Node implements Closeable { final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, clusterInfoService); modules.add(new NodeModule(this, monitorService)); ClusterModule clusterModule = new ClusterModule(settings, clusterService, - pluginsService.filterPlugins(ClusterPlugin.class)); + pluginsService.filterPlugins(ClusterPlugin.class), clusterInfoService); modules.add(clusterModule); IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)); modules.add(indicesModule); @@ -437,7 +436,8 @@ public class Node implements Closeable { final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), - clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class)); + clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), + clusterModule.getAllocationService()); NodeService nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(), transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(), httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter()); @@ -488,6 +488,9 @@ public class Node implements Closeable { ); injector = modules.createInjector(); + // TODO hack around circular dependencies problems in AllocationService + clusterModule.getAllocationService().setGatewayAllocator(injector.getInstance(GatewayAllocator.class)); + List<LifecycleComponent> pluginLifecycleComponents = pluginComponents.stream() .filter(p -> p instanceof LifecycleComponent) .map(p -> (LifecycleComponent) p).collect(Collectors.toList()); @@ -644,8 +647,6 @@ public class Node implements Closeable { Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings)); logger.info("starting ..."); - // hack around dependency injection problem (for now...) - injector.getInstance(Discovery.class).setAllocationService(injector.getInstance(AllocationService.class)); pluginLifecycleComponents.forEach(LifecycleComponent::start); injector.getInstance(MappingUpdatedAction.class).setClient(client); @@ -663,9 +664,6 @@ public class Node implements Closeable { nodeConnectionsService.start(); clusterService.setNodeConnectionsService(nodeConnectionsService); - // TODO hack around circular dependencies problems - injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class)); - injector.getInstance(ResourceWatcherService.class).start(); injector.getInstance(GatewayService.class).start(); Discovery discovery = injector.getInstance(Discovery.class); diff --git a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java index 0569547b822..c3af5593cd7 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java +++ b/core/src/main/java/org/elasticsearch/plugins/DiscoveryPlugin.java @@ -23,6 +23,7 @@ import java.util.Collections; import java.util.Map; import java.util.function.Supplier; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -68,7 +69,8 @@ public interface DiscoveryPlugin { MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, - UnicastHostsProvider hostsProvider) { + UnicastHostsProvider hostsProvider, + AllocationService allocationService) { return Collections.emptyMap(); } diff --git a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 6619c1ab9e5..45b89675454 100644 --- a/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -76,6 +76,7 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext; import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -290,13 +291,13 @@ final class DefaultSearchContext extends SearchContext { } } - private static Query createTypeFilter(String[] types) { + private Query createTypeFilter(String[] types) { if (types != null && types.length >= 1) { - BytesRef[] typesBytes = new BytesRef[types.length]; - for (int i = 0; i < typesBytes.length; i++) { - typesBytes[i] = new BytesRef(types[i]); + MappedFieldType ft = mapperService().fullName(TypeFieldMapper.NAME); + if (ft != null) { + // ft might be null if no documents have been indexed yet + return ft.termsQuery(Arrays.asList(types), queryShardContext); } - return new TypeFieldMapper.TypesQuery(typesBytes); } return null; } diff --git a/core/src/main/java/org/elasticsearch/search/MultiValueMode.java b/core/src/main/java/org/elasticsearch/search/MultiValueMode.java index 2d6fd8a2b60..231bc8bf3c0 100644 --- a/core/src/main/java/org/elasticsearch/search/MultiValueMode.java +++ b/core/src/main/java/org/elasticsearch/search/MultiValueMode.java @@ -948,14 +948,10 @@ public enum MultiValueMode implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static MultiValueMode readMultiValueModeFrom(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown MultiValueMode ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(MultiValueMode.class); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java index 44adb33c01b..41a0fa6dd30 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/Aggregator.java @@ -139,16 +139,12 @@ public abstract class Aggregator extends BucketCollector implements Releasable { } public static SubAggCollectionMode readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown SubAggCollectionMode ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(SubAggCollectionMode.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java index ddd252a6f53..3a0d2fff982 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenAggregationBuilder.java @@ -98,8 +98,8 @@ public class ChildrenAggregationBuilder extends ValuesSourceAggregationBuilder<P parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType); if (parentDocMapper != null) { - parentFilter = parentDocMapper.typeFilter(); - childFilter = childDocMapper.typeFilter(); + parentFilter = parentDocMapper.typeFilter(context.getQueryShardContext()); + childFilter = childDocMapper.typeFilter(context.getQueryShardContext()); ParentChildIndexFieldData parentChildIndexFieldData = context.fieldData() .getForField(parentFieldMapper.fieldType()); config.fieldContext(new FieldContext(parentFieldMapper.fieldType().name(), parentChildIndexFieldData, diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index ae18cb59d95..e4c7906f215 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -76,7 +76,7 @@ public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bu } @Override - int compareTerm(Terms.Bucket other) { + public int compareTerm(Terms.Bucket other) { return Double.compare(term, ((Number) other.getKey()).doubleValue()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 5ef96d19257..f315d915f0d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -218,7 +218,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr } @Override - int compareTerm(Terms.Bucket other) { + public int compareTerm(Terms.Bucket other) { return Long.compare(globalOrd, ((OrdBucket) other).globalOrd); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java index 21c9c461d0f..57c80c5fb40 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -101,7 +102,7 @@ public abstract class InternalMappedTerms<A extends InternalTerms<A, B>, B exten } @Override - public List<B> getBucketsInternal() { + public List<B> getBuckets() { return buckets; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 0fb4ceea33a..3834f9a65be 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -40,15 +40,13 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import static java.util.Collections.unmodifiableList; - public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends InternalTerms.Bucket<B>> extends InternalMultiBucketAggregation<A, B> implements Terms, ToXContent { protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound"); protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count"); - public abstract static class Bucket<B extends Bucket<B>> extends Terms.Bucket { + public abstract static class Bucket<B extends Bucket<B>> extends InternalMultiBucketAggregation.InternalBucket implements Terms.Bucket { /** * Reads a bucket. Should be a constructor reference. @@ -212,11 +210,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int protected abstract void writeTermTypeInfoTo(StreamOutput out) throws IOException; @Override - public final List<Terms.Bucket> getBuckets() { - return unmodifiableList(getBucketsInternal()); - } - - protected abstract List<B> getBucketsInternal(); + public abstract List<B> getBuckets(); @Override public abstract B getBucketByKey(String term); @@ -244,7 +238,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int } otherDocCount += terms.getSumOfOtherDocCounts(); final long thisAggDocCountError; - if (terms.getBucketsInternal().size() < getShardSize() || InternalOrder.isTermOrder(order)) { + if (terms.getBuckets().size() < getShardSize() || InternalOrder.isTermOrder(order)) { thisAggDocCountError = 0; } else if (InternalOrder.isCountDesc(this.order)) { if (terms.getDocCountError() > 0) { @@ -254,7 +248,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int } else { // otherwise use the doc count of the last term in the // aggregation - thisAggDocCountError = terms.getBucketsInternal().get(terms.getBucketsInternal().size() - 1).docCount; + thisAggDocCountError = terms.getBuckets().get(terms.getBuckets().size() - 1).docCount; } } else { thisAggDocCountError = -1; @@ -267,7 +261,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int } } setDocCountError(thisAggDocCountError); - for (B bucket : terms.getBucketsInternal()) { + for (B bucket : terms.getBuckets()) { // If there is already a doc count error for this bucket // subtract this aggs doc count error from it to make the // new value for the bucket. This then means that when the diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 98aa4825ee7..0de13a4d98f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -76,7 +76,7 @@ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket> } @Override - int compareTerm(Terms.Bucket other) { + public int compareTerm(Terms.Bucket other) { return Long.compare(term, ((Number) other.getKey()).longValue()); } @@ -161,7 +161,7 @@ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket> * Converts a {@link LongTerms} into a {@link DoubleTerms}, returning the value of the specified long terms as doubles. */ static DoubleTerms convertLongTermsToDouble(LongTerms longTerms, DocValueFormat decimalFormat) { - List<Terms.Bucket> buckets = longTerms.getBuckets(); + List<LongTerms.Bucket> buckets = longTerms.getBuckets(); List<DoubleTerms.Bucket> newBuckets = new ArrayList<>(); for (Terms.Bucket bucket : buckets) { newBuckets.add(new DoubleTerms.Bucket(bucket.getKeyAsNumber().doubleValue(), diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index b48c443fac9..049d996c08c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -75,7 +75,7 @@ public class StringTerms extends InternalMappedTerms<StringTerms, StringTerms.Bu } @Override - int compareTerm(Terms.Bucket other) { + public int compareTerm(Terms.Bucket other) { return termBytes.compareTo(((Bucket) other).termBytes); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java index e4ff4cd3944..166ece4e112 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/Terms.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import java.util.Arrays; @@ -33,50 +32,23 @@ import java.util.List; */ public interface Terms extends MultiBucketsAggregation { - enum ValueType { - - STRING(org.elasticsearch.search.aggregations.support.ValueType.STRING), - LONG(org.elasticsearch.search.aggregations.support.ValueType.LONG), - DOUBLE(org.elasticsearch.search.aggregations.support.ValueType.DOUBLE); - - final org.elasticsearch.search.aggregations.support.ValueType scriptValueType; - - ValueType(org.elasticsearch.search.aggregations.support.ValueType scriptValueType) { - this.scriptValueType = scriptValueType; - } - - static ValueType resolveType(String type) { - if ("string".equals(type)) { - return STRING; - } - if ("double".equals(type) || "float".equals(type)) { - return DOUBLE; - } - if ("long".equals(type) || "integer".equals(type) || "short".equals(type) || "byte".equals(type)) { - return LONG; - } - return null; - } - } - /** * A bucket that is associated with a single term */ - abstract class Bucket extends InternalMultiBucketAggregation.InternalBucket { + interface Bucket extends MultiBucketsAggregation.Bucket { - public abstract Number getKeyAsNumber(); + Number getKeyAsNumber(); - abstract int compareTerm(Terms.Bucket other); - - public abstract long getDocCountError(); + int compareTerm(Terms.Bucket other); + long getDocCountError(); } /** * Return the sorted list of the buckets in this terms aggregation. */ @Override - List<Bucket> getBuckets(); + List<? extends Bucket> getBuckets(); /** * Get the bucket for the given term, or null if there is no such bucket. diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index bdc95b2e87e..40cbacd37e6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -127,7 +127,7 @@ public class UnmappedTerms extends InternalTerms<UnmappedTerms, UnmappedTerms.Bu } @Override - protected List<Bucket> getBucketsInternal() { + public List<Bucket> getBuckets() { return emptyList(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethod.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethod.java index b10880a13c0..3b8085793dc 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethod.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesMethod.java @@ -53,20 +53,16 @@ public enum PercentilesMethod implements Writeable { } public static PercentilesMethod readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown PercentilesMethod ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(PercentilesMethod.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } @Override public String toString() { return parseField.getPreferredName(); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index aa4168bd040..e84fcc1f51d 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -180,7 +180,7 @@ public final class InnerHitsContext { // Only include docs that have the current hit as parent .add(hitQuery, Occur.FILTER) // Only include docs that have this inner hits type - .add(documentMapper.typeFilter(), Occur.FILTER) + .add(documentMapper.typeFilter(context.getQueryShardContext()), Occur.FILTER) .build(); if (size() == 0) { final int count = context.searcher().count(q); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index c7c9c547b51..5e49aa7395d 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -504,16 +504,12 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde NONE, SCORE; public static Order readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown Order ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(Order.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static Order fromString(String order) { @@ -533,16 +529,12 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde CHARS, WORD, SENTENCE; public static BoundaryScannerType readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown BoundaryScannerType ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(BoundaryScannerType.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static BoundaryScannerType fromString(String boundaryScannerType) { diff --git a/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java index f053b3d48fe..46fba776277 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java +++ b/core/src/main/java/org/elasticsearch/search/internal/AliasFilter.java @@ -44,6 +44,8 @@ public final class AliasFilter implements Writeable { private final QueryBuilder filter; private final boolean reparseAliases; + public static final AliasFilter EMPTY = new AliasFilter(null, Strings.EMPTY_ARRAY); + public AliasFilter(QueryBuilder filter, String... aliases) { this.aliases = aliases == null ? Strings.EMPTY_ARRAY : aliases; this.filter = filter; diff --git a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java index 70718b56c0c..51db82652fc 100644 --- a/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java +++ b/core/src/main/java/org/elasticsearch/search/rescore/QueryRescoreMode.java @@ -86,16 +86,12 @@ public enum QueryRescoreMode implements Writeable { public abstract float combine(float primary, float secondary); public static QueryRescoreMode readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown ScoreMode ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(QueryRescoreMode.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static QueryRescoreMode fromString(String scoreMode) { @@ -111,4 +107,4 @@ public enum QueryRescoreMode implements Writeable { public String toString() { return name().toLowerCase(Locale.ROOT); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 2901e05f051..b136dd77989 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -350,18 +350,14 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> { @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } /** * Read from a stream. */ static ScriptSortType readFromStream(final StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown ScriptSortType ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(ScriptSortType.class); } public static ScriptSortType fromString(final String str) { diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortMode.java b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java index 21495798a89..07b5bfa98c2 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortMode.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortMode.java @@ -52,15 +52,11 @@ public enum SortMode implements Writeable { @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } public static SortMode readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown SortMode ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(SortMode.class); } public static SortMode fromString(final String str) { @@ -85,4 +81,4 @@ public enum SortMode implements Writeable { public String toString() { return name().toLowerCase(Locale.ROOT); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java b/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java index cd0a3bb6d46..fbcb7b4288e 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/SortOrder.java @@ -52,16 +52,12 @@ public enum SortOrder implements Writeable { }; static SortOrder readFromStream(StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown SortOrder ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(SortOrder.class); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(this.ordinal()); + out.writeEnum(this); } public static SortOrder fromString(String op) { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java b/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java index 3cd19c5c2fb..328fc4e8218 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SortBy.java @@ -38,15 +38,11 @@ public enum SortBy implements Writeable { @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } public static SortBy readFromStream(final StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown SortBy ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(SortBy.class); } public static SortBy resolve(final String str) { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index 72fd41dc5b4..f701ff36426 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -511,15 +511,11 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } public static SuggestMode readFromStream(final StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown SuggestMode ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(SuggestMode.class); } public static SuggestMode resolve(final String str) { @@ -571,15 +567,11 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeVInt(ordinal()); + out.writeEnum(this); } public static StringDistanceImpl readFromStream(final StreamInput in) throws IOException { - int ordinal = in.readVInt(); - if (ordinal < 0 || ordinal >= values().length) { - throw new IOException("Unknown StringDistanceImpl ordinal [" + ordinal + "]"); - } - return values()[ordinal]; + return in.readEnum(StringDistanceImpl.class); } public static StringDistanceImpl resolve(final String str) { diff --git a/core/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/core/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 42ab7315234..b07c7fe3605 100644 --- a/core/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/core/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -159,4 +159,8 @@ public abstract class RemoteClusterAware extends AbstractComponent { throw new IllegalArgumentException("port must be a number", e); } } + + public static final String buildRemoteIndexName(String clusterAlias, String indexName) { + return clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + indexName; + } } diff --git a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 5c7e072f650..e4f0d0d4af7 100644 --- a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -152,7 +152,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo /** * Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end. */ - public void fetchSearchShards(SearchRequest searchRequest, final String[] indices, + public void fetchSearchShards(ClusterSearchShardsRequest searchRequest, ActionListener<ClusterSearchShardsResponse> listener) { if (connectedNodes.isEmpty()) { // just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener @@ -160,18 +160,15 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo // we can't proceed with a search on a cluster level. // in the future we might want to just skip the remote nodes in such a case but that can already be implemented on the caller // end since they provide the listener. - connectHandler.connect(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, indices, listener), listener::onFailure)); + connectHandler.connect(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, listener), listener::onFailure)); } else { - fetchShardsInternal(searchRequest, indices, listener); + fetchShardsInternal(searchRequest, listener); } } - private void fetchShardsInternal(SearchRequest searchRequest, String[] indices, + private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, final ActionListener<ClusterSearchShardsResponse> listener) { final DiscoveryNode node = nodeSupplier.get(); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) - .indicesOptions(searchRequest.indicesOptions()).local(true).preference(searchRequest.preference()) - .routing(searchRequest.routing()); transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest, new TransportResponseHandler<ClusterSearchShardsResponse>() { @@ -224,7 +221,13 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo }; } - @Override + Transport.Connection getConnection() { + DiscoveryNode discoveryNode = nodeSupplier.get(); + return transportService.getConnection(discoveryNode); + } + + + @Override public void close() throws IOException { connectHandler.close(); } diff --git a/core/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/core/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 92dce9d53f1..91a5cebbcd2 100644 --- a/core/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/core/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -24,10 +24,12 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchShardIterator; import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Booleans; @@ -176,6 +178,25 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl return remoteClusters.get(remoteCluster).isNodeConnected(node); } + public Map<String, OriginalIndices> groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate<String> indexExists) { + Map<String, OriginalIndices> originalIndicesMap = new HashMap<>(); + if (isCrossClusterSearchEnabled()) { + final Map<String, List<String>> groupedIndices = groupClusterIndices(indices, indexExists); + for (Map.Entry<String, List<String>> entry : groupedIndices.entrySet()) { + String clusterAlias = entry.getKey(); + List<String> originalIndices = entry.getValue(); + originalIndicesMap.put(clusterAlias, + new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions)); + } + if (originalIndicesMap.containsKey(LOCAL_CLUSTER_GROUP_KEY) == false) { + originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions)); + } + } else { + originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(indices, indicesOptions)); + } + return originalIndicesMap; + } + /** * Returns <code>true</code> iff the given cluster is configured as a remote cluster. Otherwise <code>false</code> */ @@ -183,8 +204,9 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl return remoteClusters.containsKey(clusterName); } - public void collectSearchShards(SearchRequest searchRequest, Map<String, OriginalIndices> remoteIndicesByCluster, - ActionListener<Map<String, ClusterSearchShardsResponse>> listener) { + public void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, + Map<String, OriginalIndices> remoteIndicesByCluster, + ActionListener<Map<String, ClusterSearchShardsResponse>> listener) { final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); final Map<String, ClusterSearchShardsResponse> searchShardsResponses = new ConcurrentHashMap<>(); final AtomicReference<TransportException> transportException = new AtomicReference<>(); @@ -195,7 +217,10 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl throw new IllegalArgumentException("no such remote cluster: " + clusterName); } final String[] indices = entry.getValue().indices(); - remoteClusterConnection.fetchSearchShards(searchRequest, indices, + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) + .indicesOptions(indicesOptions).local(true).preference(preference) + .routing(routing); + remoteClusterConnection.fetchSearchShards(searchShardsRequest, new ActionListener<ClusterSearchShardsResponse>() { @Override public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { @@ -240,6 +265,14 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl return connection.getConnection(node); } + public Transport.Connection getConnection(String cluster) { + RemoteClusterConnection connection = remoteClusters.get(cluster); + if (connection == null) { + throw new IllegalArgumentException("no such remote cluster: " + cluster); + } + return connection.getConnection(); + } + @Override protected Set<String> getRemoteClusterNames() { return this.remoteClusters.keySet(); diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 6d28944680e..7b1dcd788c2 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -120,6 +120,7 @@ grant { permission java.io.FilePermission "/proc/sys/vm/max_map_count", "read"; // io stats on Linux + permission java.io.FilePermission "/proc/self/mountinfo", "read"; permission java.io.FilePermission "/proc/diskstats", "read"; // control group stats on Linux diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 0b6b14684f9..9591e31b2b6 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -183,7 +183,6 @@ public class NodeStatsTests extends ESTestCase { assertEquals(fs.getTotal().getFree(), deserializedFs.getTotal().getFree()); assertEquals(fs.getTotal().getMount(), deserializedFs.getTotal().getMount()); assertEquals(fs.getTotal().getPath(), deserializedFs.getTotal().getPath()); - assertEquals(fs.getTotal().getSpins(), deserializedFs.getTotal().getSpins()); assertEquals(fs.getTotal().getType(), deserializedFs.getTotal().getType()); FsInfo.IoStats ioStats = fs.getIoStats(); FsInfo.IoStats deserializedIoStats = deserializedFs.getIoStats(); diff --git a/core/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/core/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index f34f4313fd6..7491eda8fd7 100644 --- a/core/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -64,7 +64,6 @@ public class TransportSearchActionTests extends ESTestCase { ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } - public void testMergeShardsIterators() throws IOException { List<ShardIterator> localShardIterators = new ArrayList<>(); { @@ -159,7 +158,8 @@ public class TransportSearchActionTests extends ESTestCase { new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT) }; Map<String, AliasFilter> indicesAndAliases = new HashMap<>(); - indicesAndAliases.put("foo", new AliasFilter(new TermsQueryBuilder("foo", "bar"), Strings.EMPTY_ARRAY)); + indicesAndAliases.put("foo", new AliasFilter(new TermsQueryBuilder("foo", "bar"), "some_alias_for_foo", + "some_other_foo_alias")); indicesAndAliases.put("bar", new AliasFilter(new MatchAllQueryBuilder(), Strings.EMPTY_ARRAY)); ClusterSearchShardsGroup[] groups = new ClusterSearchShardsGroup[] { new ClusterSearchShardsGroup(new ShardId("foo", "foo_id", 0), @@ -180,7 +180,9 @@ public class TransportSearchActionTests extends ESTestCase { new ClusterSearchShardsGroup(new ShardId("xyz", "xyz_id", 0), new ShardRouting[] {TestShardRouting.newShardRouting("xyz", 0, "node3", true, ShardRoutingState.STARTED)}) }; - searchShardsResponseMap.put("test_cluster_2", new ClusterSearchShardsResponse(groups2, nodes2, null)); + Map<String, AliasFilter> filter = new HashMap<>(); + filter.put("xyz", new AliasFilter(null, "some_alias_for_xyz")); + searchShardsResponseMap.put("test_cluster_2", new ClusterSearchShardsResponse(groups2, nodes2, filter)); Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>(); remoteIndicesByCluster.put("test_cluster_1", @@ -193,7 +195,8 @@ public class TransportSearchActionTests extends ESTestCase { assertEquals(4, iteratorList.size()); for (SearchShardIterator iterator : iteratorList) { if (iterator.shardId().getIndexName().endsWith("foo")) { - assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices()); + assertArrayEquals(new String[]{"some_alias_for_foo", "some_other_foo_alias"}, + iterator.getOriginalIndices().indices()); assertTrue(iterator.shardId().getId() == 0 || iterator.shardId().getId() == 1); assertEquals("test_cluster_1:foo", iterator.shardId().getIndexName()); ShardRouting shardRouting = iterator.nextOrNull(); @@ -204,7 +207,7 @@ public class TransportSearchActionTests extends ESTestCase { assertEquals(shardRouting.getIndexName(), "foo"); assertNull(iterator.nextOrNull()); } else if (iterator.shardId().getIndexName().endsWith("bar")) { - assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices()); + assertArrayEquals(new String[]{"bar"}, iterator.getOriginalIndices().indices()); assertEquals(0, iterator.shardId().getId()); assertEquals("test_cluster_1:bar", iterator.shardId().getIndexName()); ShardRouting shardRouting = iterator.nextOrNull(); @@ -215,7 +218,7 @@ public class TransportSearchActionTests extends ESTestCase { assertEquals(shardRouting.getIndexName(), "bar"); assertNull(iterator.nextOrNull()); } else if (iterator.shardId().getIndexName().endsWith("xyz")) { - assertArrayEquals(new String[]{"x*"}, iterator.getOriginalIndices().indices()); + assertArrayEquals(new String[]{"some_alias_for_xyz"}, iterator.getOriginalIndices().indices()); assertEquals(0, iterator.shardId().getId()); assertEquals("test_cluster_2:xyz", iterator.shardId().getIndexName()); ShardRouting shardRouting = iterator.nextOrNull(); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 9e45b0d3828..67d04c3c235 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -57,6 +57,7 @@ import java.util.Map; import java.util.function.Supplier; public class ClusterModuleTests extends ModuleTestCase { + private ClusterInfoService clusterInfoService = EmptyClusterInfoService.INSTANCE; private ClusterService clusterService = new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null); static class FakeAllocationDecider extends AllocationDecider { @@ -114,7 +115,7 @@ public class ClusterModuleTests extends ModuleTestCase { public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new EnableAllocationDecider(settings, clusterSettings)); } - }))); + }), clusterInfoService)); assertEquals(e.getMessage(), "Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice"); } @@ -126,8 +127,8 @@ public class ClusterModuleTests extends ModuleTestCase { public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { return Collections.singletonList(new FakeAllocationDecider(settings)); } - })); - assertTrue(module.allocationDeciders.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); + }), clusterInfoService); + assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class))); } private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, String name, Supplier<ShardsAllocator> supplier) { @@ -138,7 +139,7 @@ public class ClusterModuleTests extends ModuleTestCase { return Collections.singletonMap(name, supplier); } } - )); + ), clusterInfoService); } public void testRegisterShardsAllocator() { @@ -156,7 +157,7 @@ public class ClusterModuleTests extends ModuleTestCase { public void testUnknownShardsAllocator() { Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "dne").build(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - new ClusterModule(settings, clusterService, Collections.emptyList())); + new ClusterModule(settings, clusterService, Collections.emptyList(), clusterInfoService)); assertEquals("Unknown ShardsAllocator [dne]", e.getMessage()); } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index b68f3735c0a..7d3ca04e5a8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.function.Predicate; import static org.elasticsearch.common.util.set.Sets.newHashSet; import static org.hamcrest.Matchers.arrayContaining; @@ -956,4 +957,17 @@ public class IndexNameExpressionResolverTests extends ESTestCase { strings = indexNameExpressionResolver.filteringAliases(state, "test-0", "test-*,alias-*"); assertNull(strings); } + + public void testIndexAliases() { + MetaData.Builder mdBuilder = MetaData.builder() + .put(indexBuilder("test-0").state(State.OPEN) + .putAlias(AliasMetaData.builder("test-alias-0").filter("{ \"term\": \"foo\"}")) + .putAlias(AliasMetaData.builder("test-alias-1").filter("{ \"term\": \"foo\"}")) + .putAlias(AliasMetaData.builder("test-alias-non-filtering")) + ); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + String[] strings = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, true, "test-*"); + Arrays.sort(strings); + assertArrayEquals(new String[] {"test-alias-0", "test-alias-1", "test-alias-non-filtering"}, strings); + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index cfca80b78ca..5e400d95e4b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -391,7 +391,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { private class NoopGatewayAllocator extends GatewayAllocator { NoopGatewayAllocator() { - super(Settings.EMPTY, null, null); + super(Settings.EMPTY); } @Override diff --git a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index a8b065343c9..b67000e2b23 100644 --- a/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/core/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -812,4 +812,34 @@ public class BytesStreamsTests extends ESTestCase { StreamInput in = new BytesArray(Base64.getDecoder().decode("////////////AQAAAAAAAA==")).streamInput(); assertEquals(-1, in.readVLong()); } + + public enum TestEnum { + ONE, + TWO, + THREE + } + + public void testEnum() throws IOException { + TestEnum value = randomFrom(TestEnum.values()); + BytesStreamOutput output = new BytesStreamOutput(); + output.writeEnum(value); + StreamInput input = output.bytes().streamInput(); + assertEquals(value, input.readEnum(TestEnum.class)); + assertEquals(0, input.available()); + } + + public void testInvalidEnum() throws IOException { + BytesStreamOutput output = new BytesStreamOutput(); + int randomNumber = randomInt(); + boolean validEnum = randomNumber >= 0 && randomNumber < TestEnum.values().length; + output.writeVInt(randomNumber); + StreamInput input = output.bytes().streamInput(); + if (validEnum) { + assertEquals(TestEnum.values()[randomNumber], input.readEnum(TestEnum.class)); + } else { + IOException ex = expectThrows(IOException.class, () -> input.readEnum(TestEnum.class)); + assertEquals("Unknown TestEnum ordinal [" + randomNumber + "]", ex.getMessage()); + } + assertEquals(0, input.available()); + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 9460261e547..39a9dbff959 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -71,7 +72,8 @@ public class DiscoveryModuleTests extends ESTestCase { default Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, + AllocationService allocationService) { return impl(); } } @@ -93,7 +95,7 @@ public class DiscoveryModuleTests extends ESTestCase { private DiscoveryModule newModule(Settings settings, List<DiscoveryPlugin> plugins) { return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService, - clusterApplier, clusterSettings, plugins); + clusterApplier, clusterSettings, plugins, null); } public void testDefaults() { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 0d0d391663d..d0c138954ab 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -299,7 +299,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), masterService, (source, clusterStateSupplier, listener) -> listener.clusterStateProcessed(source, clusterStateSupplier.get(), clusterStateSupplier.get()), - clusterSettings, Collections::emptyList); + clusterSettings, Collections::emptyList, null); zenDiscovery.start(); return zenDiscovery; } diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 385770426f5..c202db1470e 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -238,31 +238,43 @@ public class IndexServiceTests extends ESSingleNodeTestCase { } public void testRescheduleAsyncFsync() throws Exception { - Settings settings = Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "100ms") // very often :) - .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) + final Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) .build(); - IndexService indexService = createIndex("test", settings); + final IndexService indexService = createIndex("test", settings); ensureGreen("test"); assertNull(indexService.getFsyncTask()); - IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)).build(); - indexService.updateMetaData(metaData); - assertNotNull(indexService.getFsyncTask()); - assertTrue(indexService.getRefreshTask().mustReschedule()); - client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); - IndexShard shard = indexService.getShard(0); - assertBusy(() -> { - assertFalse(shard.getTranslog().syncNeeded()); - }); - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)).build(); - indexService.updateMetaData(metaData); + client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)) + .get(); + + assertNotNull(indexService.getFsyncTask()); + assertTrue(indexService.getFsyncTask().mustReschedule()); + client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + assertNotNull(indexService.getFsyncTask()); + final IndexShard shard = indexService.getShard(0); + assertBusy(() -> assertFalse(shard.getTranslog().syncNeeded())); + + client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)) + .get(); assertNull(indexService.getFsyncTask()); - metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)).build(); - indexService.updateMetaData(metaData); + client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)) + .get(); assertNotNull(indexService.getFsyncTask()); - } public void testIllegalFsyncInterval() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index aa66526bf42..aedd332471a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -99,7 +99,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { .bytes(), XContentType.JSON)); - assertFieldNames(set("a", "a.keyword", "b", "b.c", "_uid", "_type", "_version", "_seq_no", "_primary_term", "_source"), doc); + assertFieldNames(set("a", "a.keyword", "b", "b.c", "_uid", "_version", "_seq_no", "_primary_term", "_source"), doc); } public void testExplicitEnabled() throws Exception { @@ -117,7 +117,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { .bytes(), XContentType.JSON)); - assertFieldNames(set("field", "field.keyword", "_uid", "_type", "_version", "_seq_no", "_primary_term", "_source"), doc); + assertFieldNames(set("field", "field.keyword", "_uid", "_version", "_seq_no", "_primary_term", "_source"), doc); } public void testDisabled() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 4d467d641d9..cb0b922e197 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -290,13 +290,13 @@ public class MapperServiceTests extends ESSingleNodeTestCase { public void testIndexSortWithNestedFields() throws IOException { Settings settings = Settings.builder() - .put("index.sort.field", "_type") + .put("index.sort.field", "foo") .build(); IllegalArgumentException invalidNestedException = expectThrows(IllegalArgumentException.class, - () -> createIndex("test", settings, "t", "nested_field", "type=nested")); + () -> createIndex("test", settings, "t", "nested_field", "type=nested", "foo", "type=keyword")); assertThat(invalidNestedException.getMessage(), containsString("cannot have nested fields when index sort is activated")); - IndexService indexService = createIndex("test", settings, "t"); + IndexService indexService = createIndex("test", settings, "t", "foo", "type=keyword"); CompressedXContent nestedFieldMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject() .startObject("properties") .startObject("nested_field") @@ -310,7 +310,6 @@ public class MapperServiceTests extends ESSingleNodeTestCase { containsString("cannot have nested fields when index sort is activated")); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/pull/24317#issuecomment-297624290") public void testForbidMultipleTypes() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); MapperService mapperService = createIndex("test").mapperService(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index 18a771bb467..a6fbfc44a56 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -18,14 +18,17 @@ */ package org.elasticsearch.index.mapper; +import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.net.InetAddress; import java.util.Arrays; import java.util.HashSet; import java.util.Locale; @@ -40,6 +43,8 @@ import static org.hamcrest.Matchers.containsString; public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { private static String FROM_DATE = "2016-10-31"; private static String TO_DATE = "2016-11-01 20:00:00"; + private static String FROM_IP = "::ffff:c0a8:107"; + private static String TO_IP = "2001:db8::"; private static int FROM = 5; private static String FROM_STR = FROM + ""; private static int TO = 10; @@ -48,12 +53,14 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { @Override protected void setTypeList() { - TYPES = new HashSet<>(Arrays.asList("date_range", "float_range", "double_range", "integer_range", "long_range")); + TYPES = new HashSet<>(Arrays.asList("date_range", "ip_range", "float_range", "double_range", "integer_range", "long_range")); } private Object getFrom(String type) { if (type.equals("date_range")) { return FROM_DATE; + } else if (type.equals("ip_range")) { + return FROM_IP; } return random().nextBoolean() ? FROM : FROM_STR; } @@ -69,13 +76,17 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { private Object getTo(String type) { if (type.equals("date_range")) { return TO_DATE; + } else if (type.equals("ip_range")) { + return TO_IP; } return random().nextBoolean() ? TO : TO_STR; } - private Number getMax(String type) { + private Object getMax(String type) { if (type.equals("date_range") || type.equals("long_range")) { return Long.MAX_VALUE; + } else if (type.equals("ip_range")) { + return InetAddressPoint.MAX_VALUE; } else if (type.equals("integer_range")) { return Integer.MAX_VALUE; } else if (type.equals("float_range")) { @@ -189,7 +200,14 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { assertEquals(2, pointField.fieldType().pointDimensionCount()); IndexableField storedField = fields[1]; assertTrue(storedField.fieldType().stored()); - assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? "1477872000000" : "5")); + String strVal = "5"; + if (type.equals("date_range")) { + strVal = "1477872000000"; + } else if (type.equals("ip_range")) { + strVal = InetAddresses.toAddrString(InetAddresses.forString("192.168.1.7")) + " : " + + InetAddresses.toAddrString(InetAddresses.forString("2001:db8:0:0:0:0:0:0")); + } + assertThat(storedField.stringValue(), containsString(strVal)); } @Override @@ -234,7 +252,8 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { .endObject().bytes(), XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); - assertThat(e.getCause().getMessage(), anyOf(containsString("passed as String"), containsString("failed to parse date"))); + assertThat(e.getCause().getMessage(), anyOf(containsString("passed as String"), + containsString("failed to parse date"), containsString("is not an IP string literal"))); } @Override @@ -261,7 +280,8 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { assertEquals(2, doc.rootDoc().getFields("field").length); IndexableField[] fields = doc.rootDoc().getFields("field"); IndexableField storedField = fields[1]; - assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? Long.MAX_VALUE+"" : getMax(type)+"")); + String expected = type.equals("ip_range") ? InetAddresses.toAddrString((InetAddress)getMax(type)) : getMax(type) +""; + assertThat(storedField.stringValue(), containsString(expected)); // test null max value doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() @@ -280,8 +300,14 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { assertFalse(pointField.fieldType().stored()); storedField = fields[1]; assertTrue(storedField.fieldType().stored()); - assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? "1477872000000" : "5")); - assertThat(storedField.stringValue(), containsString(getMax(type) + "")); + String strVal = "5"; + if (type.equals("date_range")) { + strVal = "1477872000000"; + } else if (type.equals("ip_range")) { + strVal = InetAddresses.toAddrString(InetAddresses.forString("192.168.1.7")) + " : " + + InetAddresses.toAddrString(InetAddresses.forString("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); + } + assertThat(storedField.stringValue(), containsString(strVal)); } public void testNoBounds() throws Exception { @@ -316,8 +342,8 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase { assertFalse(pointField.fieldType().stored()); IndexableField storedField = fields[1]; assertTrue(storedField.fieldType().stored()); - assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? Long.MAX_VALUE+"" : getMax(type)+"")); - assertThat(storedField.stringValue(), containsString(getMax(type) + "")); + String expected = type.equals("ip_range") ? InetAddresses.toAddrString((InetAddress)getMax(type)) : getMax(type) +""; + assertThat(storedField.stringValue(), containsString(expected)); } public void testIllegalArguments() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index b3d7db23c38..015509d4a73 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -21,6 +21,8 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.document.DoubleRange; import org.apache.lucene.document.FloatRange; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.InetAddressRange; import org.apache.lucene.document.IntRange; import org.apache.lucene.document.LongRange; import org.apache.lucene.index.IndexOptions; @@ -37,6 +39,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.joda.time.DateTime; import org.junit.Before; +import java.net.InetAddress; import java.util.Locale; public class RangeFieldTypeTests extends FieldTypeTestCase { @@ -100,6 +103,8 @@ public class RangeFieldTypeTests extends FieldTypeTestCase { return getLongRangeQuery(relation, (long)from, (long)to, includeLower, includeUpper); case DOUBLE: return getDoubleRangeQuery(relation, (double)from, (double)to, includeLower, includeUpper); + case IP: + return getInetAddressRangeQuery(relation, (InetAddress)from, (InetAddress)to, includeLower, includeUpper); default: return getFloatRangeQuery(relation, (float)from, (float)to, includeLower, includeUpper); } @@ -142,7 +147,8 @@ public class RangeFieldTypeTests extends FieldTypeTestCase { return FloatRange.newIntersectsQuery(FIELDNAME, lower, upper); } - private Query getDoubleRangeQuery(ShapeRelation relation, double from, double to, boolean includeLower, boolean includeUpper) { + private Query getDoubleRangeQuery(ShapeRelation relation, double from, double to, boolean includeLower, + boolean includeUpper) { double[] lower = new double[] {includeLower ? from : Math.nextUp(from)}; double[] upper = new double[] {includeUpper ? to : Math.nextDown(to)}; if (relation == ShapeRelation.WITHIN) { @@ -153,7 +159,19 @@ public class RangeFieldTypeTests extends FieldTypeTestCase { return DoubleRange.newIntersectsQuery(FIELDNAME, lower, upper); } - private Object nextFrom() { + private Query getInetAddressRangeQuery(ShapeRelation relation, InetAddress from, InetAddress to, boolean includeLower, + boolean includeUpper) { + InetAddress lower = includeLower ? from : InetAddressPoint.nextUp(from); + InetAddress upper = includeUpper ? to : InetAddressPoint.nextDown(to); + if (relation == ShapeRelation.WITHIN) { + return InetAddressRange.newWithinQuery(FIELDNAME, lower, upper); + } else if (relation == ShapeRelation.CONTAINS) { + return InetAddressRange.newContainsQuery(FIELDNAME, lower, upper); + } + return InetAddressRange.newIntersectsQuery(FIELDNAME, lower, upper); + } + + private Object nextFrom() throws Exception { switch (type) { case INTEGER: return (int)(random().nextInt() * 0.5 - DISTANCE); @@ -163,12 +181,14 @@ public class RangeFieldTypeTests extends FieldTypeTestCase { return (long)(random().nextLong() * 0.5 - DISTANCE); case FLOAT: return (float)(random().nextFloat() * 0.5 - DISTANCE); + case IP: + return InetAddress.getByName("::ffff:c0a8:107"); default: return random().nextDouble() * 0.5 - DISTANCE; } } - private Object nextTo(Object from) { + private Object nextTo(Object from) throws Exception { switch (type) { case INTEGER: return (Integer)from + DISTANCE; @@ -178,6 +198,8 @@ public class RangeFieldTypeTests extends FieldTypeTestCase { return (Long)from + DISTANCE; case DOUBLE: return (Double)from + DISTANCE; + case IP: + return InetAddress.getByName("2001:db8::"); default: return (Float)from + DISTANCE; } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java index 6fade26ca02..d3091ac3459 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldMapperTests.java @@ -19,16 +19,31 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; +import org.elasticsearch.index.mapper.MapperService.MergeReason; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import java.io.IOException; +import java.util.Arrays; import java.util.Collection; - -import static org.hamcrest.Matchers.instanceOf; +import java.util.Collections; public class TypeFieldMapperTests extends ESSingleNodeTestCase { @@ -37,13 +52,60 @@ public class TypeFieldMapperTests extends ESSingleNodeTestCase { return pluginList(InternalSettingsPlugin.class); } - public void testDocValues() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); - - DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - TypeFieldMapper typeMapper = docMapper.metadataMapper(TypeFieldMapper.class); - assertTrue(typeMapper.fieldType().hasDocValues()); - assertThat(typeMapper.fieldType().fielddataBuilder(), instanceOf(DocValuesIndexFieldData.Builder.class)); + public void testDocValuesMultipleTypes() throws Exception { + testDocValues(false); } + public void testDocValuesSingleType() throws Exception { + testDocValues(true); + } + + public void testDocValues(boolean singleType) throws IOException { + Settings indexSettings = Settings.builder() + .put("index.mapping.single_type", singleType) + .build(); + MapperService mapperService = createIndex("test", indexSettings).mapperService(); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); + + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); + w.addDocument(document.rootDoc()); + DirectoryReader r = DirectoryReader.open(w); + w.close(); + + MappedFieldType ft = mapperService.fullName(TypeFieldMapper.NAME); + IndexOrdinalsFieldData fd = (IndexOrdinalsFieldData) ft.fielddataBuilder().build(mapperService.getIndexSettings(), + ft, new IndexFieldDataCache.None(), new NoneCircuitBreakerService(), mapperService); + AtomicOrdinalsFieldData afd = fd.load(r.leaves().get(0)); + SortedSetDocValues values = afd.getOrdinalsValues(); + assertTrue(values.advanceExact(0)); + assertEquals(0, values.nextOrd()); + assertEquals(SortedSetDocValues.NO_MORE_ORDS, values.nextOrd()); + assertEquals(new BytesRef("type"), values.lookupOrd(0)); + r.close(); + dir.close(); + } + + public void testDefaultsMultipleTypes() throws IOException { + Settings indexSettings = Settings.builder() + .put("index.mapping.single_type", false) + .build(); + MapperService mapperService = createIndex("test", indexSettings).mapperService(); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); + IndexableField[] fields = document.rootDoc().getFields(TypeFieldMapper.NAME); + assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions()); + assertEquals(DocValuesType.SORTED_SET, fields[1].fieldType().docValuesType()); + } + + public void testDefaultsSingleType() throws IOException { + Settings indexSettings = Settings.builder() + .put("index.mapping.single_type", true) + .build(); + MapperService mapperService = createIndex("test", indexSettings).mapperService(); + DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false); + ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON)); + assertEquals(Collections.<IndexableField>emptyList(), Arrays.asList(document.rootDoc().getFields(TypeFieldMapper.NAME))); + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java index 3c80f095f83..b8a2805efe9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TypeFieldTypeTests.java @@ -30,15 +30,25 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import org.junit.Before; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.QueryShardContext; +import org.mockito.Mockito; import java.io.IOException; +import java.util.Collections; +import java.util.Set; public class TypeFieldTypeTests extends FieldTypeTestCase { @Override @@ -46,25 +56,62 @@ public class TypeFieldTypeTests extends FieldTypeTestCase { return new TypeFieldMapper.TypeFieldType(); } - @Before - public void setupProperties() { - addModifier(new Modifier("fielddata", true) { - @Override - public void modify(MappedFieldType ft) { - TypeFieldMapper.TypeFieldType tft = (TypeFieldMapper.TypeFieldType) ft; - tft.setFielddata(tft.fielddata() == false); - } - }); + public void testTermsQueryWhenTypesAreDisabled() throws Exception { + QueryShardContext context = Mockito.mock(QueryShardContext.class); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put("index.mapping.single_type", true).build(); + IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); + IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); + + MapperService mapperService = Mockito.mock(MapperService.class); + Set<String> types = Collections.emptySet(); + Mockito.when(mapperService.types()).thenReturn(types); + Mockito.when(context.getMapperService()).thenReturn(mapperService); + + TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(); + ft.setName(TypeFieldMapper.NAME); + Query query = ft.termQuery("my_type", context); + assertEquals(new MatchNoDocsQuery(), query); + + types = Collections.singleton("my_type"); + Mockito.when(mapperService.types()).thenReturn(types); + query = ft.termQuery("my_type", context); + assertEquals(new MatchAllDocsQuery(), query); + + Mockito.when(mapperService.hasNested()).thenReturn(true); + query = ft.termQuery("my_type", context); + assertEquals(Queries.newNonNestedFilter(), query); + + types = Collections.singleton("other_type"); + Mockito.when(mapperService.types()).thenReturn(types); + query = ft.termQuery("my_type", context); + assertEquals(new MatchNoDocsQuery(), query); } - public void testTermsQuery() throws Exception { + public void testTermsQueryWhenTypesAreEnabled() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig()); IndexReader reader = openReaderWithNewType("my_type", w); + QueryShardContext context = Mockito.mock(QueryShardContext.class); + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put("index.mapping.single_type", false).build(); + IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); + IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + Mockito.when(context.getIndexSettings()).thenReturn(mockSettings); + TypeFieldMapper.TypeFieldType ft = new TypeFieldMapper.TypeFieldType(); ft.setName(TypeFieldMapper.NAME); - Query query = ft.termQuery("my_type", null); + Query query = ft.termQuery("my_type", context); assertEquals(new MatchAllDocsQuery(), query.rewrite(reader)); // Make sure that Lucene actually simplifies the query when there is a single type diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index a21f7757eac..f0f38ba48c4 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -240,6 +240,12 @@ public class IndexRecoveryIT extends ESIntegTestCase { validateIndexRecoveryState(nodeBRecoveryState.getIndex()); } + @TestLogging( + "_root:DEBUG," + + "org.elasticsearch.cluster.service:TRACE," + + "org.elasticsearch.indices.cluster:TRACE," + + "org.elasticsearch.indices.recovery:TRACE," + + "org.elasticsearch.index.shard:TRACE") public void testRerouteRecovery() throws Exception { logger.info("--> start node A"); final String nodeA = internalCluster().startNode(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/CombiIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/CombiIT.java index 7517e98ff27..c38552ae254 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/CombiIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/CombiIT.java @@ -30,7 +30,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import java.util.Collection; +import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -95,7 +95,7 @@ public class CombiIT extends ESIntegTestCase { Terms terms = aggs.get("values"); assertNotNull(terms); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(values.size())); for (Terms.Bucket bucket : buckets) { values.remove(((Number) bucket.getKey()).intValue()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java index 00f82dfbe44..6c08c169724 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/EquivalenceIT.java @@ -485,10 +485,10 @@ public class EquivalenceIT extends ESIntegTestCase { } private void assertEquals(Terms t1, Terms t2) { - List<Terms.Bucket> t1Buckets = t1.getBuckets(); - List<Terms.Bucket> t2Buckets = t1.getBuckets(); + List<? extends Terms.Bucket> t1Buckets = t1.getBuckets(); + List<? extends Terms.Bucket> t2Buckets = t1.getBuckets(); assertEquals(t1Buckets.size(), t2Buckets.size()); - for (Iterator<Terms.Bucket> it1 = t1Buckets.iterator(), it2 = t2Buckets.iterator(); it1.hasNext(); ) { + for (Iterator<? extends Terms.Bucket> it1 = t1Buckets.iterator(), it2 = t2Buckets.iterator(); it1.hasNext(); ) { final Terms.Bucket b1 = it1.next(); final Terms.Bucket b2 = it2.next(); assertEquals(b1.getDocCount(), b2.getDocCount()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 9af28806fef..c8803b7e790 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -108,7 +109,7 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { ).execute().actionGet(); assertSearchResponse(response); Terms genres = response.getAggregations().get("genres"); - Collection<Bucket> genreBuckets = genres.getBuckets(); + Collection<? extends Bucket> genreBuckets = genres.getBuckets(); // For this test to be useful we need >1 genre bucket to compare assertThat(genreBuckets.size(), greaterThan(1)); double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; @@ -141,7 +142,7 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); Terms authors = sample.getAggregations().get("authors"); - Collection<Bucket> testBuckets = authors.getBuckets(); + List<? extends Bucket> testBuckets = authors.getBuckets(); for (Terms.Bucket testBucket : testBuckets) { assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); @@ -162,11 +163,11 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { .addAggregation(rootTerms).execute().actionGet(); assertSearchResponse(response); Terms genres = response.getAggregations().get("genres"); - Collection<Bucket> genreBuckets = genres.getBuckets(); + List<? extends Bucket> genreBuckets = genres.getBuckets(); for (Terms.Bucket genreBucket : genreBuckets) { Sampler sample = genreBucket.getAggregations().get("sample"); Terms authors = sample.getAggregations().get("authors"); - Collection<Bucket> testBuckets = authors.getBuckets(); + List<? extends Bucket> testBuckets = authors.getBuckets(); for (Terms.Bucket testBucket : testBuckets) { assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_AUTHOR)); @@ -195,7 +196,7 @@ public class DiversifiedSamplerIT extends ESIntegTestCase { Sampler sample = genreSample.getAggregations().get("sample"); Terms genres = sample.getAggregations().get("genres"); - Collection<Bucket> testBuckets = genres.getBuckets(); + List<? extends Bucket> testBuckets = genres.getBuckets(); for (Terms.Bucket testBucket : testBuckets) { assertThat(testBucket.getDocCount(), lessThanOrEqualTo((long) NUM_SHARDS * MAX_DOCS_PER_GENRE)); } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index cf28541121c..ca106721fcc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -842,7 +842,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("num_tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); Terms.Bucket tag = iters.next(); assertThat(tag, notNullValue()); @@ -883,7 +883,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); // the max for "1" is 2 // the max for "0" is 4 diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java index 6793768a91f..a54dc3e2f5e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/LongTermsIT.java @@ -854,7 +854,7 @@ public class LongTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("num_tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); Terms.Bucket tag = iters.next(); assertThat(tag, notNullValue()); @@ -893,7 +893,7 @@ public class LongTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); // the max for "1" is 2 // the max for "0" is 4 diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index 7307b756e3f..e1e8f1ba660 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -161,8 +161,8 @@ public class MinDocCountIT extends AbstractTermsTestCase { // check that terms2 is a subset of terms1 private void assertSubset(Terms terms1, Terms terms2, long minDocCount, int size, String include) { final Matcher matcher = include == null ? null : Pattern.compile(include).matcher("");; - final Iterator<Terms.Bucket> it1 = terms1.getBuckets().iterator(); - final Iterator<Terms.Bucket> it2 = terms2.getBuckets().iterator(); + final Iterator<? extends Terms.Bucket> it1 = terms1.getBuckets().iterator(); + final Iterator<? extends Terms.Bucket> it2 = terms2.getBuckets().iterator(); int size2 = 0; while (it1.hasNext()) { final Terms.Bucket bucket1 = it1.next(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java index f87b98bc878..ebd078de674 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SamplerIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -104,7 +105,7 @@ public class SamplerIT extends ESIntegTestCase { ).execute().actionGet(); assertSearchResponse(response); Terms genres = response.getAggregations().get("genres"); - Collection<Bucket> genreBuckets = genres.getBuckets(); + List<? extends Bucket> genreBuckets = genres.getBuckets(); // For this test to be useful we need >1 genre bucket to compare assertThat(genreBuckets.size(), greaterThan(1)); double lastMaxPrice = asc ? Double.MIN_VALUE : Double.MAX_VALUE; @@ -130,7 +131,7 @@ public class SamplerIT extends ESIntegTestCase { assertSearchResponse(response); Sampler sample = response.getAggregations().get("sample"); Terms authors = sample.getAggregations().get("authors"); - Collection<Bucket> testBuckets = authors.getBuckets(); + List<? extends Bucket> testBuckets = authors.getBuckets(); long maxBooksPerAuthor = 0; for (Terms.Bucket testBucket : testBuckets) { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java index ecc16b85f13..748c5f886f6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -22,8 +22,8 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -43,7 +43,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<String, Long> expected = new HashMap<>(); expected.put("1", 8L); @@ -66,7 +66,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<String, Long> expected = new HashMap<>(); expected.put("1", 8L); @@ -90,7 +90,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) Map<String, Long> expected = new HashMap<>(); expected.put("1", 8L); @@ -114,7 +114,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) Map<String, Long> expected = new HashMap<>(); expected.put("1", 5L); @@ -137,7 +137,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<String, Long> expected = new HashMap<>(); expected.put("1", 8L); @@ -160,7 +160,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); @@ -183,7 +183,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); @@ -206,7 +206,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); @@ -230,7 +230,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); // we still only return 3 entries (based on the 'size' param) Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 5L); @@ -253,7 +253,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); @@ -276,7 +276,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); @@ -299,7 +299,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); @@ -322,7 +322,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); @@ -345,7 +345,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 5L); @@ -368,7 +368,7 @@ public class ShardSizeTermsIT extends ShardSizeTestCase { .execute().actionGet(); Terms terms = response.getAggregations().get("keys"); - Collection<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(3)); Map<Integer, Long> expected = new HashMap<>(); expected.put(1, 8L); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 7fc20908e81..09cec9a458f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -438,7 +438,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { assertSearchResponse(response); StringTerms classes = response.getAggregations().get("class"); assertThat(classes.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> classBuckets = classes.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> classBuckets = classes.getBuckets().iterator(); Aggregations aggregations = classBuckets.next().getAggregations(); SignificantTerms sigTerms = aggregations.get("mySignificantTerms"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java index b5dbcd9085a..df69cfcfa93 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/StringTermsIT.java @@ -1012,7 +1012,7 @@ public class StringTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); Terms.Bucket tag = iters.next(); assertThat(tag, notNullValue()); @@ -1054,7 +1054,7 @@ public class StringTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); // the max for "more" is 2 // the max for "less" is 4 @@ -1117,7 +1117,7 @@ public class StringTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); // the max for "more" is 2 // the max for "less" is 4 @@ -1180,7 +1180,7 @@ public class StringTermsIT extends AbstractTermsTestCase { assertThat(tags.getName(), equalTo("tags")); assertThat(tags.getBuckets().size(), equalTo(2)); - Iterator<Terms.Bucket> iters = tags.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> iters = tags.getBuckets().iterator(); // the max for "more" is 2 // the max for "less" is 4 diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 9d5ca3afc54..9ed32ca2e7b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -177,7 +177,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { assertThat(testTerms, notNullValue()); assertThat(testTerms.getName(), equalTo("terms")); assertThat(testTerms.getDocCountError(), greaterThanOrEqualTo(0L)); - Collection<Bucket> testBuckets = testTerms.getBuckets(); + List<? extends Bucket> testBuckets = testTerms.getBuckets(); assertThat(testBuckets.size(), lessThanOrEqualTo(size)); assertThat(accurateTerms.getBuckets().size(), greaterThanOrEqualTo(testBuckets.size())); @@ -211,7 +211,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { assertThat(testTerms, notNullValue()); assertThat(testTerms.getName(), equalTo("terms")); assertThat(testTerms.getDocCountError(), equalTo(0L)); - Collection<Bucket> testBuckets = testTerms.getBuckets(); + List<? extends Bucket> testBuckets = testTerms.getBuckets(); assertThat(testBuckets.size(), lessThanOrEqualTo(size)); assertThat(accurateTerms.getBuckets().size(), greaterThanOrEqualTo(testBuckets.size())); @@ -229,7 +229,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { assertThat(testTerms, notNullValue()); assertThat(testTerms.getName(), equalTo("terms")); assertThat(testTerms.getDocCountError(), equalTo(0L)); - Collection<Bucket> testBuckets = testTerms.getBuckets(); + List<? extends Bucket> testBuckets = testTerms.getBuckets(); assertThat(testBuckets.size(), lessThanOrEqualTo(size)); for (Terms.Bucket testBucket : testBuckets) { @@ -248,7 +248,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { assertThat(testTerms, notNullValue()); assertThat(testTerms.getName(), equalTo("terms")); assertThat(testTerms.getDocCountError(),anyOf(equalTo(-1L), equalTo(0L))); - Collection<Bucket> testBuckets = testTerms.getBuckets(); + List<? extends Bucket> testBuckets = testTerms.getBuckets(); assertThat(testBuckets.size(), lessThanOrEqualTo(size)); assertThat(accurateTerms.getBuckets().size(), greaterThanOrEqualTo(testBuckets.size())); @@ -988,7 +988,7 @@ public class TermsDocCountErrorIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getDocCountError(), equalTo(46L)); - List<Bucket> buckets = terms.getBuckets(); + List<? extends Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(5)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregatorTests.java index 47aa35bf924..17152bc450a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregatorTests.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.metrics.min.InternalMin; import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; +import org.mockito.Mockito; import java.io.IOException; import java.util.Arrays; @@ -165,8 +166,8 @@ public class ParentToChildrenAggregatorTests extends AggregatorTestCase { when(mapperService.documentMapper(CHILD_TYPE)).thenReturn(childDocMapper); when(mapperService.documentMapper(PARENT_TYPE)).thenReturn(parentDocMapper); when(mapperService.docMappers(false)).thenReturn(Arrays.asList(new DocumentMapper[] { childDocMapper, parentDocMapper })); - when(parentDocMapper.typeFilter()).thenReturn(new TypeFieldMapper.TypesQuery(new BytesRef(PARENT_TYPE))); - when(childDocMapper.typeFilter()).thenReturn(new TypeFieldMapper.TypesQuery(new BytesRef(CHILD_TYPE))); + when(parentDocMapper.typeFilter(Mockito.any())).thenReturn(new TypeFieldMapper.TypesQuery(new BytesRef(PARENT_TYPE))); + when(childDocMapper.typeFilter(Mockito.any())).thenReturn(new TypeFieldMapper.TypesQuery(new BytesRef(CHILD_TYPE))); return mapperService; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index f2977fd7692..1648d8ede9f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -136,7 +136,7 @@ public class TermsAggregatorTests extends AggregatorTestCase { InternalAggregation mergedAggs = internalAgg.doReduce(aggs, ctx); assertTrue(mergedAggs instanceof DoubleTerms); long expected = numLongs + numDoubles; - List<Terms.Bucket> buckets = ((DoubleTerms) mergedAggs).getBuckets(); + List<? extends Terms.Bucket> buckets = ((DoubleTerms) mergedAggs).getBuckets(); assertEquals(4, buckets.size()); assertEquals("1.0", buckets.get(0).getKeyAsString()); assertEquals(expected, buckets.get(0).getDocCount()); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index 2e2aa9657d8..b7739d6c816 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -334,7 +334,7 @@ public class AvgIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java index 7de7bb0f315..3903dd8b0bc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -604,7 +604,7 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java index e4444571a31..1a97cb49164 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsIT.java @@ -233,7 +233,7 @@ public class GeoBoundsIT extends AbstractGeoTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Bucket> buckets = terms.getBuckets(); + List<? extends Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(10)); for (int i = 0; i < 10; i++) { Bucket bucket = buckets.get(i); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 0aef20bca6d..5b56e6b7efb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -515,7 +515,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java index d3531da9250..56fb14402ad 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -506,7 +506,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java index 5c9e14c3453..03eb9a09237 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxIT.java @@ -336,7 +336,7 @@ public class MaxIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java index 2b517b64fa1..cba2ba9eb97 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/MinIT.java @@ -348,7 +348,7 @@ public class MinIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 85db6166307..9231f093963 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -455,7 +455,7 @@ public class StatsIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index 1591e6df931..16d345c7b84 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -333,7 +333,7 @@ public class SumIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index 057731e275a..f1943747ceb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -466,7 +466,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java index d81ca004b97..2589e9977a6 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -451,7 +451,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { Terms terms = searchResponse.getAggregations().get("terms"); assertThat(terms, notNullValue()); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets, notNullValue()); assertThat(buckets.size(), equalTo(10)); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index aeef44d6272..e9b9ae20407 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -519,9 +519,9 @@ public class TopHitsIT extends ESIntegTestCase { SearchHits hits = topHits.getHits(); assertThat(hits.getTotalHits(), equalTo(10L)); assertThat(hits.getHits().length, equalTo(3)); - assertThat((Long) hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); - assertThat((Long) hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); - assertThat((Long) hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); + assertThat(hits.getAt(0).getSortValues()[0], equalTo(higestSortValue)); + assertThat(hits.getAt(1).getSortValues()[0], equalTo(higestSortValue - 1)); + assertThat(hits.getAt(2).getSortValues()[0], equalTo(higestSortValue - 2)); Max max = bucket.getAggregations().get("max_sort"); assertThat(max.getValue(), equalTo(((Long) higestSortValue).doubleValue())); higestSortValue -= 10; @@ -544,7 +544,7 @@ public class TopHitsIT extends ESIntegTestCase { assertThat(terms.getName(), equalTo("terms")); assertThat(terms.getBuckets().size(), equalTo(3)); - Iterator<Terms.Bucket> bucketIterator = terms.getBuckets().iterator(); + Iterator<? extends Terms.Bucket> bucketIterator = terms.getBuckets().iterator(); Terms.Bucket bucket = bucketIterator.next(); assertThat(key(bucket), equalTo("b")); TopHits topHits = bucket.getAggregations().get("hits"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java index d62e93fa5e9..4f6ff0e32ed 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java @@ -140,7 +140,7 @@ public class AvgBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -182,7 +182,7 @@ public class AvgBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); double bucketSum = 0; @@ -223,7 +223,7 @@ public class AvgBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -277,7 +277,7 @@ public class AvgBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -322,7 +322,7 @@ public class AvgBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket"); @@ -349,7 +349,7 @@ public class AvgBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); double aggTermsSum = 0; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index d4310e581c0..607124ecb15 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -211,7 +211,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -262,7 +262,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); double bucketSum = 0; @@ -312,7 +312,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -375,7 +375,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -429,7 +429,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); ExtendedStatsBucket extendedStatsBucketValue = response.getAggregations().get("extended_stats_bucket"); @@ -487,7 +487,7 @@ public class ExtendedStatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); double aggTermsSum = 0; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index aba941ebb4b..632f11f7ec7 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -149,7 +149,7 @@ public class MaxBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -196,7 +196,7 @@ public class MaxBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); List<String> maxKeys = new ArrayList<>(); @@ -242,7 +242,7 @@ public class MaxBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -348,7 +348,7 @@ public class MaxBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -397,7 +397,7 @@ public class MaxBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); @@ -425,7 +425,7 @@ public class MaxBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); List<String> maxTermsKeys = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java index cbd6824b3a4..04fdd0c3133 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java @@ -146,7 +146,7 @@ public class MinBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -193,7 +193,7 @@ public class MinBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); List<String> minKeys = new ArrayList<>(); @@ -239,7 +239,7 @@ public class MinBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -298,7 +298,7 @@ public class MinBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -347,7 +347,7 @@ public class MinBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); @@ -375,7 +375,7 @@ public class MinBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); List<String> minTermsKeys = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index c582c76bd8c..e23e5441431 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -145,7 +145,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -187,7 +187,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); double[] values = new double[interval]; @@ -220,7 +220,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); double[] values = new double[interval]; @@ -261,7 +261,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -316,7 +316,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -361,7 +361,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); @@ -384,7 +384,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); @@ -479,7 +479,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); double[] values = new double[termsBuckets.size()]; @@ -539,7 +539,7 @@ public class PercentilesBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); double[] values = new double[termsBuckets.size()]; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java index c38dc99bdf9..231005f1b5b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java @@ -147,7 +147,7 @@ public class StatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -195,7 +195,7 @@ public class StatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); double bucketSum = 0; @@ -242,7 +242,7 @@ public class StatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -302,7 +302,7 @@ public class StatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -353,7 +353,7 @@ public class StatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); StatsBucket statsBucketValue = response.getAggregations().get("stats_bucket"); @@ -380,7 +380,7 @@ public class StatsBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); double aggTermsSum = 0; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java index 09582430046..048dfac8648 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java @@ -137,7 +137,7 @@ public class SumBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -176,7 +176,7 @@ public class SumBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); double bucketSum = 0; @@ -214,7 +214,7 @@ public class SumBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -265,7 +265,7 @@ public class SumBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); for (int i = 0; i < interval; ++i) { @@ -307,7 +307,7 @@ public class SumBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> buckets = terms.getBuckets(); + List<? extends Terms.Bucket> buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket"); @@ -334,7 +334,7 @@ public class SumBucketIT extends ESIntegTestCase { Terms terms = response.getAggregations().get("terms"); assertThat(terms, notNullValue()); assertThat(terms.getName(), equalTo("terms")); - List<Terms.Bucket> termsBuckets = terms.getBuckets(); + List<? extends Terms.Bucket> termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); double aggTermsSum = 0; diff --git a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java index 1a661b509a2..5b33fb1c8de 100644 --- a/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java +++ b/core/src/test/java/org/elasticsearch/test/NoopDiscovery.java @@ -28,11 +28,6 @@ import org.elasticsearch.discovery.DiscoveryStats; public class NoopDiscovery implements Discovery { - @Override - public void setAllocationService(AllocationService allocationService) { - - } - @Override public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) { diff --git a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 1ce73aee905..f7ab29c95c5 100644 --- a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -385,7 +385,10 @@ public class RemoteClusterConnectionTests extends ESTestCase { failReference.set(x); responseLatch.countDown(); }); - connection.fetchSearchShards(request, new String[]{"test-index"}, shardsListener); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(new String[]{"test-index"}) + .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) + .routing(request.routing()); + connection.fetchSearchShards(searchShardsRequest, shardsListener); responseLatch.await(); assertNull(failReference.get()); assertNotNull(reference.get()); diff --git a/core/src/test/resources/indices/bwc/index-5.4.0.zip b/core/src/test/resources/indices/bwc/index-5.4.0.zip new file mode 100644 index 00000000000..e8473b3aa30 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-5.4.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-5.4.0.zip b/core/src/test/resources/indices/bwc/repo-5.4.0.zip new file mode 100644 index 00000000000..53c565b4776 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-5.4.0.zip differ diff --git a/docs/README.asciidoc b/docs/README.asciidoc index 9365f877eb5..90602d76ffa 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -4,24 +4,28 @@ Elasticsearch documentation build process. See: https://github.com/elastic/docs Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN -CONSOLE" in the documentation and are automatically tested by the command -`gradle :docs:check`. To test just the docs from a single page, use e.g. -`gradle :docs:check -Dtests.method=*rollover*`. +CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested +by the command `gradle :docs:check`. To test just the docs from a single page, +use e.g. `gradle :docs:check -Dtests.method=*rollover*`. -By default `// CONSOLE` snippet runs as its own isolated -test. You can manipulate the test execution in the following ways: +By default each `// CONSOLE` snippet runs as its own isolated test. You can +manipulate the test execution in the following ways: * `// TEST`: Explicitly marks a snippet as a test. Snippets marked this way -are tests even if they don't have `// CONSOLE`. - * `// TEST[s/foo/bar/]`: Replace `foo` with `bar` in the test. This should be - used sparingly because it makes the test "lie". Sometimes, though, you can use - it to make the tests more clear. +are tests even if they don't have `// CONSOLE` but usually `// TEST` is used +for its modifiers: + * `// TEST[s/foo/bar/]`: Replace `foo` with `bar` in the generated test. This + should be used sparingly because it makes the snippet "lie". Sometimes, + though, you can use it to make the snippet more clear more clear. Keep in + mind the that if there are multiple substitutions then they are applied in + the order that they are defined. * `// TEST[catch:foo]`: Used to expect errors in the requests. Replace `foo` with `request` to expect a 400 error, for example. If the snippet contains multiple requests then only the last request will expect the error. * `// TEST[continued]`: Continue the test started in the last snippet. Between - tests the nodes are cleaned: indexes are removed, etc. This will prevent that. - This is really useful when you have text and snippets that work together to + tests the nodes are cleaned: indexes are removed, etc. This prevents that + from happening between snippets because the two snippets are a single test. + This is most useful when you have text and snippets that work together to tell the story of some use case because it merges the snippets (and thus the use case) into one big test. * `// TEST[skip:reason]`: Skip this test. Replace `reason` with the actual @@ -38,8 +42,11 @@ are tests even if they don't have `// CONSOLE`. * `// TESTRESPONSE`: Matches this snippet against the body of the response of the last test. If the response is JSON then order is ignored. If you add `// TEST[continued]` to the snippet after `// TESTRESPONSE` it will continue - in the same test, allowing you to interleve requests with responses to check. - * `// TESTRESPONSE[s/foo/bar/]`: Substitutions. See `// TEST[s/foo/bar]`. + in the same test, allowing you to interleave requests with responses to check. + * `// TESTRESPONSE[s/foo/bar/]`: Substitutions. See `// TEST[s/foo/bar]` for + how it works. These are much more common than `// TEST[s/foo/bar]` because + they are useful for eliding portions of the response that are not pertinent + to the documentation. * `// TESTRESPONSE[_cat]`: Add substitutions for testing `_cat` responses. Use this after all other substitutions so it doesn't make other substitutions difficult. diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 12e606ba78a..bbd5f3bce07 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,9 +1,9 @@ :version: 6.0.0-alpha1 :major-version: 6.x -:lucene_version: 6.4.0 -:lucene_version_path: 6_4_0 +:lucene_version: 7.0.0-SNAPSHOT +:lucene_version_path: 7_0_0 :branch: master -:jdk: 1.8.0_73 +:jdk: 1.8.0_131 ////////// release-state can be: released | prerelease | unreleased diff --git a/docs/build.gradle b/docs/build.gradle index 7effa7401b0..ef53bd135ab 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -38,9 +38,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/aggregations/metrics/percentile-rank-aggregation.asciidoc', 'reference/aggregations/metrics/scripted-metric-aggregation.asciidoc', 'reference/aggregations/metrics/tophits-aggregation.asciidoc', - 'reference/cat/snapshots.asciidoc', - 'reference/cat/templates.asciidoc', - 'reference/cat/thread_pool.asciidoc', 'reference/cluster/allocation-explain.asciidoc', 'reference/cluster/nodes-info.asciidoc', 'reference/cluster/nodes-stats.asciidoc', @@ -57,12 +54,8 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/indices/recovery.asciidoc', 'reference/indices/segments.asciidoc', 'reference/indices/shard-stores.asciidoc', - 'reference/ingest/ingest-node.asciidoc', - 'reference/mapping/dynamic/templates.asciidoc', - 'reference/modules/cross-cluster-search.asciidoc', // this is hard to test since we need 2 clusters -- maybe we can trick it into referencing itself... 'reference/search/field-stats.asciidoc', 'reference/search/profile.asciidoc', - 'reference/search/request/inner-hits.asciidoc', ] integTestCluster { diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index 64b87801acb..20c62a5861a 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -68,3 +68,113 @@ The following settings are supported: Override the chunk size. (Disabled by default) +`security.principal`:: + + Kerberos principal to use when connecting to a secured HDFS cluster. + If you are using a service principal for your elasticsearch node, you may + use the `_HOST` pattern in the principal name and the plugin will replace + the pattern with the hostname of the node at runtime (see + link:repository-hdfs-security-runtime[Creating the Secure Repository]). + +[[repository-hdfs-security]] +==== Hadoop Security + +The HDFS Repository Plugin integrates seamlessly with Hadoop's authentication model. The following authentication +methods are supported by the plugin: + +[horizontal] +`simple`:: + + Also means "no security" and is enabled by default. Uses information from underlying operating system account + running elasticsearch to inform Hadoop of the name of the current user. Hadoop makes no attempts to verify this + information. + +`kerberos`:: + + Authenticates to Hadoop through the usage of a Kerberos principal and keytab. Interfacing with HDFS clusters + secured with Kerberos requires a few additional steps to enable (See <<repository-hdfs-security-keytabs>> and + <<repository-hdfs-security-runtime>> for more info) + +[[repository-hdfs-security-keytabs]] +[float] +===== Principals and Keytabs +Before attempting to connect to a secured HDFS cluster, provision the Kerberos principals and keytabs that the +Elasticsearch nodes will use for authenticating to Kerberos. For maximum security and to avoid tripping up the Kerberos +replay protection, you should create a service principal per node, following the pattern of +`elasticsearch/hostname@REALM`. + +WARNING: In some cases, if the same principal is authenticating from multiple clients at once, services may reject +authentication for those principals under the assumption that they could be replay attacks. If you are running the +plugin in production with multiple nodes you should be using a unique service principal for each node. + +On each Elasticsearch node, place the appropriate keytab file in the node's configuration location under the +`repository-hdfs` directory using the name `krb5.keytab`: + +[source, bash] +---- +$> cd elasticsearch/config +$> ls +elasticsearch.yml jvm.options log4j2.properties repository-hdfs/ scripts/ +$> cd repository-hdfs +$> ls +krb5.keytab +---- +// TEST[skip:this is for demonstration purposes only + +NOTE: Make sure you have the correct keytabs! If you are using a service principal per node (like +`elasticsearch/hostname@REALM`) then each node will need its own unique keytab file for the principal assigned to that +host! + +// Setup at runtime (principal name) +[[repository-hdfs-security-runtime]] +[float] +===== Creating the Secure Repository +Once your keytab files are in place and your cluster is started, creating a secured HDFS repository is simple. Just +add the name of the principal that you will be authenticating as in the repository settings under the +`security.principal` option: + +[source,js] +---- +PUT _snapshot/my_hdfs_repository +{ + "type": "hdfs", + "settings": { + "uri": "hdfs://namenode:8020/", + "path": "/user/elasticsearch/respositories/my_hdfs_repository", + "security.principal": "elasticsearch@REALM" + } +} +---- +// CONSOLE +// TEST[skip:we don't have hdfs set up while testing this] + +If you are using different service principals for each node, you can use the `_HOST` pattern in your principal +name. Elasticsearch will automatically replace the pattern with the hostname of the node at runtime: + +[source,js] +---- +PUT _snapshot/my_hdfs_repository +{ + "type": "hdfs", + "settings": { + "uri": "hdfs://namenode:8020/", + "path": "/user/elasticsearch/respositories/my_hdfs_repository", + "security.principal": "elasticsearch/_HOST@REALM" + } +} +---- +// CONSOLE +// TEST[skip:we don't have hdfs set up while testing this] + +[[repository-hdfs-security-authorization]] +[float] +===== Authorization +Once Elasticsearch is connected and authenticated to HDFS, HDFS will infer a username to use for +authorizing file access for the client. By default, it picks this username from the primary part of +the kerberos principal used to authenticate to the service. For example, in the case of a principal +like `elasticsearch@REALM` or `elasticsearch/hostname@REALM` then the username that HDFS +extracts for file access checks will be `elasticsearch`. + +NOTE: The repository plugin makes no assumptions of what Elasticsearch's principal name is. The main fragment of the +Kerberos principal is not required to be `elasticsearch`. If you have a principal or service name that works better +for you or your organization then feel free to use it instead! \ No newline at end of file diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 58ca5dab2d7..a0fe757ab40 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -168,6 +168,9 @@ The following settings are supported: Specifies the path within bucket to repository data. Defaults to value of `repositories.s3.base_path` or to root directory if not set. + Previously, the base_path could take a leading `/` (forward slash). + However, this has been deprecated and setting the base_path now should + omit the leading `/`. `access_key`:: diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 3d34cd51e6d..5677a0f2a7c 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -5,15 +5,30 @@ The `snapshots` command shows all snapshots that belong to a specific repository To find a list of available repositories to query, the command `/_cat/repositories` can be used. Querying the snapshots of a repository named `repo1` then looks as follows. -[source,sh] +[source,js] +-------------------------------------------------- +GET /_cat/snapshots/repo1?v&s=id +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT \/_snapshot\/repo1\/snap1?wait_for_completion=true\n/] +// TEST[s/^/PUT \/_snapshot\/repo1\/snap2?wait_for_completion=true\n/] +// TEST[s/^/PUT \/_snapshot\/repo1\n{"type": "fs", "settings": {"location": "repo\/1"}}\n/] + +Which looks like: + +[source,txt] -------------------------------------------------- -% curl 'localhost:9200/_cat/snapshots/repo1?v' id status start_epoch start_time end_epoch end_time duration indices successful_shards failed_shards total_shards snap1 FAILED 1445616705 18:11:45 1445616978 18:16:18 4.6m 1 4 1 5 snap2 SUCCESS 1445634298 23:04:58 1445634672 23:11:12 6.2m 2 10 0 10 -------------------------------------------------- +// TESTRESPONSE[s/FAILED/SUCCESS/ s/14456\d+/\\d+/ s/\d+(\.\d+)?(m|s|ms)/\\d+(\\.\\d+)?(m|s|ms)/] +// TESTRESPONSE[s/\d+:\d+:\d+/\\d+:\\d+:\\d+/] +// TESTRESPONSE[s/1 4 1 5/\\d+ \\d+ \\d+ \\d+/] +// TESTRESPONSE[s/2 10 0 10/\\d+ \\d+ \\d+ \\d+/] +// TESTRESPONSE[_cat] Each snapshot contains information about when it was started and stopped. Start and stop timestamps are available in two formats. The `HH:MM:SS` output is simply for quick human consumption. -The epoch time retains more information, including date, and is machine sortable if the snapshot process spans days. \ No newline at end of file +The epoch time retains more information, including date, and is machine sortable if the snapshot process spans days. diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index 51ab34d0546..bc221d13552 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -3,14 +3,25 @@ The `templates` command provides information about existing templates. -[source, sh] +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/templates?v=true' -name template order version -template0 te* 0 -template1 tea* 1 -template2 teak* 2 7 +GET /_cat/templates?v&s=name -------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT _template\/template0\n{"index_patterns": "te*", "order": 0}\n/] +// TEST[s/^/PUT _template\/template1\n{"index_patterns": "tea*", "order": 1}\n/] +// TEST[s/^/PUT _template\/template2\n{"index_patterns": "teak*", "order": 2, "version": 7}\n/] + +which looks like + +[source,txt] +-------------------------------------------------- +name index_patterns order version +template0 [te*] 0 +template1 [tea*] 1 +template2 [teak*] 2 7 +-------------------------------------------------- +// TESTRESPONSE[s/\*/\\*/ s/\[/\\[/ s/\]/\\]/ _cat] The output shows that there are three existing templates, with template2 having a version value. diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 7da1aa5ce0f..721d85e46a0 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -4,35 +4,43 @@ The `thread_pool` command shows cluster wide thread pool statistics per node. By default the active, queue and rejected statistics are returned for all thread pools. -[source,sh] +[source,js] -------------------------------------------------- -% curl 192.168.56.10:9200/_cat/thread_pool -0EWUhXe bulk 0 0 0 -0EWUhXe fetch_shard_started 0 0 0 -0EWUhXe fetch_shard_store 0 0 0 -0EWUhXe flush 0 0 0 -0EWUhXe force_merge 0 0 0 -0EWUhXe generic 0 0 0 -0EWUhXe get 0 0 0 -0EWUhXe index 0 0 0 -0EWUhXe listener 0 0 0 -0EWUhXe management 1 0 0 -0EWUhXe refresh 0 0 0 -0EWUhXe search 0 0 0 -0EWUhXe snapshot 0 0 0 -0EWUhXe warmer 0 0 0 +GET /_cat/thread_pool -------------------------------------------------- +// CONSOLE + +Which looks like: + +[source,txt] +-------------------------------------------------- +node-0 bulk 0 0 0 +node-0 fetch_shard_started 0 0 0 +node-0 fetch_shard_store 0 0 0 +node-0 flush 0 0 0 +node-0 force_merge 0 0 0 +node-0 generic 0 0 0 +node-0 get 0 0 0 +node-0 index 0 0 0 +node-0 listener 0 0 0 +node-0 management 1 0 0 +node-0 refresh 0 0 0 +node-0 search 0 0 0 +node-0 snapshot 0 0 0 +node-0 warmer 0 0 0 +-------------------------------------------------- +// TESTRESPONSE[s/\d+/\\d+/ _cat] The first column is the node name -[source,sh] +[source,txt] -------------------------------------------------- node_name -0EWUhXe +node-0 -------------------------------------------------- The second column is the thread pool name -[source,sh] +[source,txt] -------------------------------------------------- name bulk @@ -54,7 +62,7 @@ warmer The next three columns show the active, queue, and rejected statistics for each thread pool -[source,sh] +[source,txt] -------------------------------------------------- active queue rejected 0 0 0 @@ -76,12 +84,20 @@ active queue rejected The cat thread pool API accepts a `thread_pool_patterns` URL parameter for specifying a comma-separated list of regular expressions to match thread pool names. -[source,sh] +[source,js] +-------------------------------------------------- +GET /_cat/thread_pool/generic?v&h=id,name,active,rejected,completed +-------------------------------------------------- +// CONSOLE + +which looks like: + +[source,js] -------------------------------------------------- -% curl 'localhost:9200/_cat/thread_pool/generic?v&h=id,name,active,rejected,completed' id name active rejected completed 0EWUhXeBQtaVGlexUeVwMg generic 0 0 70 -------------------------------------------------- +// TESTRESPONSE[s/0EWUhXeBQtaVGlexUeVwMg/[\\w-]+/ s/\d+/\\d+/ _cat] Here the host columns and the active, rejected and completed suggest thread pool statistics are displayed. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 2a4d7852a2f..cb97bcf6d9f 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -42,9 +42,7 @@ include::testing.asciidoc[] include::glossary.asciidoc[] -////// - include::release-notes.asciidoc[] -////// +include::release-notes.asciidoc[] include::painless-api-reference.asciidoc[] diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index f195ee1f2fd..723d7205cfc 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -12,6 +12,7 @@ and a list of `processors`: "processors" : [ ... ] } -------------------------------------------------- +// NOTCONSOLE The `description` is a special field to store a helpful description of what the pipeline does. @@ -126,6 +127,7 @@ using `filter_path` to limit the response to just the `version`: -------------------------------------------------- GET /_ingest/pipeline/my-pipeline-id?filter_path=*.version -------------------------------------------------- +// CONSOLE // TEST[continued] This should give a small response that makes it both easy and inexpensive to parse: @@ -209,6 +211,7 @@ POST _ingest/pipeline/_simulate ] } -------------------------------------------------- +// NOTCONSOLE Here is the structure of a simulate request against an existing pipeline: @@ -223,7 +226,7 @@ POST _ingest/pipeline/my-pipeline-id/_simulate ] } -------------------------------------------------- - +// NOTCONSOLE Here is an example of a simulate request with a pipeline defined in the request and its response: @@ -275,42 +278,36 @@ Response: { "doc": { "_id": "id", - "_ttl": null, - "_parent": null, "_index": "index", - "_routing": null, "_type": "type", - "_timestamp": null, "_source": { "field2": "_value", "foo": "bar" }, "_ingest": { - "timestamp": "2016-01-04T23:53:27.186+0000" + "timestamp": "2017-05-04T22:30:03.187Z" } } }, { "doc": { "_id": "id", - "_ttl": null, - "_parent": null, "_index": "index", - "_routing": null, "_type": "type", - "_timestamp": null, "_source": { "field2": "_value", "foo": "rab" }, "_ingest": { - "timestamp": "2016-01-04T23:53:27.186+0000" + "timestamp": "2017-05-04T22:30:03.188Z" } } } ] } -------------------------------------------------- +// TESTRESPONSE[s/"2017-05-04T22:30:03.187Z"/$body.docs.0.doc._ingest.timestamp/] +// TESTRESPONSE[s/"2017-05-04T22:30:03.188Z"/$body.docs.1.doc._ingest.timestamp/] [[ingest-verbose-param]] ==== Viewing Verbose Results @@ -374,41 +371,31 @@ Response: { "processor_results": [ { - "tag": "processor[set]-0", "doc": { "_id": "id", - "_ttl": null, - "_parent": null, "_index": "index", - "_routing": null, "_type": "type", - "_timestamp": null, "_source": { "field2": "_value2", "foo": "bar" }, "_ingest": { - "timestamp": "2016-01-05T00:02:51.383+0000" + "timestamp": "2017-05-04T22:46:09.674Z" } } }, { - "tag": "processor[set]-1", "doc": { "_id": "id", - "_ttl": null, - "_parent": null, "_index": "index", - "_routing": null, "_type": "type", - "_timestamp": null, "_source": { "field3": "_value3", "field2": "_value2", "foo": "bar" }, "_ingest": { - "timestamp": "2016-01-05T00:02:51.383+0000" + "timestamp": "2017-05-04T22:46:09.675Z" } } } @@ -417,41 +404,31 @@ Response: { "processor_results": [ { - "tag": "processor[set]-0", "doc": { "_id": "id", - "_ttl": null, - "_parent": null, "_index": "index", - "_routing": null, "_type": "type", - "_timestamp": null, "_source": { "field2": "_value2", "foo": "rab" }, "_ingest": { - "timestamp": "2016-01-05T00:02:51.384+0000" + "timestamp": "2017-05-04T22:46:09.676Z" } } }, { - "tag": "processor[set]-1", "doc": { "_id": "id", - "_ttl": null, - "_parent": null, "_index": "index", - "_routing": null, "_type": "type", - "_timestamp": null, "_source": { "field3": "_value3", "field2": "_value2", "foo": "rab" }, "_ingest": { - "timestamp": "2016-01-05T00:02:51.384+0000" + "timestamp": "2017-05-04T22:46:09.677Z" } } } @@ -460,6 +437,10 @@ Response: ] } -------------------------------------------------- +// TESTRESPONSE[s/"2017-05-04T22:46:09.674Z"/$body.docs.0.processor_results.0.doc._ingest.timestamp/] +// TESTRESPONSE[s/"2017-05-04T22:46:09.675Z"/$body.docs.0.processor_results.1.doc._ingest.timestamp/] +// TESTRESPONSE[s/"2017-05-04T22:46:09.676Z"/$body.docs.1.processor_results.0.doc._ingest.timestamp/] +// TESTRESPONSE[s/"2017-05-04T22:46:09.677Z"/$body.docs.1.processor_results.1.doc._ingest.timestamp/] [[accessing-data-in-pipelines]] == Accessing Data in Pipelines @@ -482,6 +463,7 @@ their name. For example: } } -------------------------------------------------- +// NOTCONSOLE On top of this, fields from the source are always accessible via the `_source` prefix: @@ -494,6 +476,7 @@ On top of this, fields from the source are always accessible via the `_source` p } } -------------------------------------------------- +// NOTCONSOLE [float] [[accessing-metadata-fields]] @@ -513,6 +496,7 @@ The following example sets the `_id` metadata field of a document to `1`: } } -------------------------------------------------- +// NOTCONSOLE The following metadata fields are accessible by a processor: `_index`, `_type`, `_id`, `_routing`, `_parent`. @@ -538,6 +522,7 @@ The following example adds a field with the name `received`. The value is the in } } -------------------------------------------------- +// NOTCONSOLE Unlike Elasticsearch metadata fields, the ingest metadata field name `_ingest` can be used as a valid field name in the source of a document. Use `_source._ingest` to refer to the field in the source document. Otherwise, `_ingest` @@ -562,6 +547,7 @@ the values of `field_a` and `field_b`. } } -------------------------------------------------- +// NOTCONSOLE The following example uses the value of the `geoip.country_iso_code` field in the source to set the index that the document will be indexed into: @@ -575,6 +561,7 @@ to set the index that the document will be indexed into: } } -------------------------------------------------- +// NOTCONSOLE [[handling-failure-in-pipelines]] == Handling Failures in Pipelines @@ -620,6 +607,7 @@ Elasticsearch. ] } -------------------------------------------------- +// NOTCONSOLE The following example defines an `on_failure` block on a whole pipeline to change the index to which failed documents get sent. @@ -639,6 +627,7 @@ the index to which failed documents get sent. ] } -------------------------------------------------- +// NOTCONSOLE Alternatively instead of defining behaviour in case of processor failure, it is also possible to ignore a failure and continue with the next processor by specifying the `ignore_failure` setting. @@ -661,6 +650,7 @@ continues to execute, which in this case means that the pipeline does nothing. ] } -------------------------------------------------- +// NOTCONSOLE The `ignore_failure` can be set on any processor and defaults to `false`. @@ -699,6 +689,7 @@ metadata field to provide the error message. ] } -------------------------------------------------- +// NOTCONSOLE [[ingest-processors]] == Processors @@ -713,6 +704,7 @@ All processors are defined in the following way within a pipeline definition: } } -------------------------------------------------- +// NOTCONSOLE Each processor defines its own configuration parameters, but all processors have the ability to declare `tag` and `on_failure` fields. These fields are optional. @@ -765,6 +757,7 @@ Accepts a single value or an array of values. } } -------------------------------------------------- +// NOTCONSOLE [[convert-processor]] === Convert Processor @@ -802,6 +795,7 @@ such a case, `target_field` will still be updated with the unconverted field val } } -------------------------------------------------- +// NOTCONSOLE [[date-processor]] === Date Processor @@ -842,6 +836,7 @@ Here is an example that adds the parsed date to the `timestamp` field based on t ] } -------------------------------------------------- +// NOTCONSOLE [[date-index-name-processor]] === Date Index Name Processor @@ -1011,6 +1006,7 @@ to the requester. } } -------------------------------------------------- +// NOTCONSOLE [[foreach-processor]] === Foreach Processor @@ -1059,6 +1055,7 @@ Assume the following document: "values" : ["foo", "bar", "baz"] } -------------------------------------------------- +// NOTCONSOLE When this `foreach` processor operates on this sample document: @@ -1075,6 +1072,7 @@ When this `foreach` processor operates on this sample document: } } -------------------------------------------------- +// NOTCONSOLE Then the document will look like this after preprocessing: @@ -1084,6 +1082,7 @@ Then the document will look like this after preprocessing: "values" : ["FOO", "BAR", "BAZ"] } -------------------------------------------------- +// NOTCONSOLE Let's take a look at another example: @@ -1102,6 +1101,7 @@ Let's take a look at another example: ] } -------------------------------------------------- +// NOTCONSOLE In this case, the `id` field needs to be removed, so the following `foreach` processor is used: @@ -1119,6 +1119,7 @@ so the following `foreach` processor is used: } } -------------------------------------------------- +// NOTCONSOLE After preprocessing the result is: @@ -1135,6 +1136,7 @@ After preprocessing the result is: ] } -------------------------------------------------- +// NOTCONSOLE The wrapped processor can have a `on_failure` definition. For example, the `id` field may not exist on all person objects. @@ -1162,6 +1164,7 @@ block to send the document to the 'failure_index' index for later inspection: } } -------------------------------------------------- +// NOTCONSOLE In this example, if the `remove` processor does fail, then the array elements that have been processed thus far will @@ -1210,7 +1213,7 @@ The `TYPE` is the type you wish to cast your named field. `int` and `float` are For example, you might want to match the following text: -[source,js] +[source,txt] -------------------------------------------------- 3.44 55.3.244.1 -------------------------------------------------- @@ -1218,7 +1221,7 @@ For example, you might want to match the following text: You may know that the message in the example is a number followed by an IP address. You can match this text by using the following Grok expression. -[source,js] +[source,txt] -------------------------------------------------- %{NUMBER:duration} %{IP:client} -------------------------------------------------- @@ -1247,10 +1250,11 @@ a document. "message": "55.3.244.1 GET /index.html 15824 0.043" } -------------------------------------------------- +// NOTCONSOLE The pattern for this could be: -[source,js] +[source,txt] -------------------------------------------------- %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} -------------------------------------------------- @@ -1271,6 +1275,7 @@ Here is an example pipeline for processing the above document by using Grok: ] } -------------------------------------------------- +// NOTCONSOLE This pipeline will insert these named captures as new fields within the document, like so: @@ -1285,6 +1290,7 @@ This pipeline will insert these named captures as new fields within the document "duration": "0.043" } -------------------------------------------------- +// NOTCONSOLE [[custom-patterns]] ==== Custom Patterns and Pattern Files @@ -1313,6 +1319,7 @@ Here is an example of a pipeline specifying custom pattern definitions: ] } -------------------------------------------------- +// NOTCONSOLE [[trace-match]] ==== Providing Multiple Match Patterns @@ -1472,6 +1479,7 @@ If the field is not a string, the processor will throw an exception. } } -------------------------------------------------- +// NOTCONSOLE [[join-processor]] === Join Processor @@ -1496,6 +1504,7 @@ Throws an error when the field is not an array. } } -------------------------------------------------- +// NOTCONSOLE [[json-processor]] === JSON Processor @@ -1522,6 +1531,7 @@ Suppose you provide this configuration of the `json` processor: } } -------------------------------------------------- +// NOTCONSOLE If the following document is processed: @@ -1531,6 +1541,7 @@ If the following document is processed: "string_source": "{\"foo\": 2000}" } -------------------------------------------------- +// NOTCONSOLE after the `json` processor operates on it, it will look like: @@ -1543,6 +1554,7 @@ after the `json` processor operates on it, it will look like: } } -------------------------------------------------- +// NOTCONSOLE If the following configuration is provided, omitting the optional `target_field` setting: [source,js] @@ -1553,6 +1565,7 @@ If the following configuration is provided, omitting the optional `target_field` } } -------------------------------------------------- +// NOTCONSOLE then after the `json` processor operates on this document: @@ -1562,6 +1575,7 @@ then after the `json` processor operates on this document: "source_and_target": "{\"foo\": 2000}" } -------------------------------------------------- +// NOTCONSOLE it will look like: @@ -1573,8 +1587,9 @@ it will look like: } } -------------------------------------------------- +// NOTCONSOLE -This illustrates that, unless it is explicitly named in the processor configuration, the `target_field` +This illustrates that, unless it is explicitly named in the processor configuration, the `target_field` is the same field provided in the required `field` configuration. [[kv-processor]] @@ -1594,6 +1609,7 @@ For example, if you have a log message which contains `ip=1.2.3.4 error=REFUSED` } } -------------------------------------------------- +// NOTCONSOLE [[kv-options]] .Kv Options @@ -1630,6 +1646,7 @@ Converts a string to its lowercase equivalent. } } -------------------------------------------------- +// NOTCONSOLE [[remove-processor]] === Remove Processor @@ -1651,6 +1668,7 @@ Removes an existing field. If the field doesn't exist, an exception will be thro } } -------------------------------------------------- +// NOTCONSOLE [[rename-processor]] === Rename Processor @@ -1675,6 +1693,7 @@ Renames an existing field. If the field doesn't exist or the new name is already } } -------------------------------------------------- +// NOTCONSOLE [[script-processor]] === Script Processor @@ -1718,6 +1737,7 @@ numeric fields `field_a` and `field_b` multiplied by the parameter param_c: } } -------------------------------------------------- +// NOTCONSOLE [[set-processor]] @@ -1744,6 +1764,7 @@ its value will be replaced with the provided one. } } -------------------------------------------------- +// NOTCONSOLE [[split-processor]] === Split Processor @@ -1768,6 +1789,7 @@ Splits a field into an array using a separator character. Only works on string f } } -------------------------------------------------- +// NOTCONSOLE <1> Treat all consecutive whitespace characters as a single separator [[sort-processor]] @@ -1794,6 +1816,7 @@ Throws an error when the field is not an array. } } -------------------------------------------------- +// NOTCONSOLE [[trim-processor]] === Trim Processor @@ -1818,6 +1841,7 @@ NOTE: This only works on leading and trailing whitespace. } } -------------------------------------------------- +// NOTCONSOLE [[uppercase-processor]] === Uppercase Processor @@ -1840,6 +1864,7 @@ Converts a string to its uppercase equivalent. } } -------------------------------------------------- +// NOTCONSOLE [[dot-expand-processor]] === Dot Expander Processor @@ -1865,6 +1890,7 @@ Otherwise these <<accessing-data-in-pipelines,fields>> can't be accessed by any } } -------------------------------------------------- +// NOTCONSOLE For example the dot expand processor would turn this document: @@ -1874,6 +1900,7 @@ For example the dot expand processor would turn this document: "foo.bar" : "value" } -------------------------------------------------- +// NOTCONSOLE into: @@ -1885,6 +1912,7 @@ into: } } -------------------------------------------------- +// NOTCONSOLE If there is already a `bar` field nested under `foo` then this processor merges the the `foo.bar` field into it. If the field is @@ -1901,6 +1929,7 @@ For example, the following document: } } -------------------------------------------------- +// NOTCONSOLE is transformed by the `dot_expander` processor into: @@ -1912,6 +1941,7 @@ is transformed by the `dot_expander` processor into: } } -------------------------------------------------- +// NOTCONSOLE If any field outside of the leaf field conflicts with a pre-existing field of the same name, then that field needs to be renamed first. @@ -1925,6 +1955,7 @@ Consider the following document: "foo.bar": "value2" } -------------------------------------------------- +// NOTCONSOLE Then the the `foo` needs to be renamed first before the `dot_expander` processor is applied. So in order for the `foo.bar` field to properly @@ -1949,6 +1980,7 @@ pipeline should be used: ] } -------------------------------------------------- +// NOTCONSOLE The reason for this is that Ingest doesn't know how to automatically cast a scalar field to an object field. diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index eb9eb1a60d9..90fc5de5c45 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -32,6 +32,7 @@ Dynamic templates are specified as an array of named objects: ... ] -------------------------------------------------- +// NOTCONSOLE <1> The template name can be any string value. <2> The match conditions can include any of : `match_mapping_type`, `match`, `match_pattern`, `unmatch`, `path_match`, `path_unmatch`. <3> The mapping that the matched field should use. @@ -94,7 +95,6 @@ PUT my_index/my_type/1 "my_integer": 5, <1> "my_string": "Some string" <2> } - -------------------------------------------------- // CONSOLE <1> The `my_integer` field is mapped as an `integer`. @@ -156,6 +156,7 @@ instead of simple wildcards, for instance: "match_pattern": "regex", "match": "^profit_\d+$" -------------------------------------------------- +// NOTCONSOLE [[path-match-unmatch]] ==== `path_match` and `path_unmatch` @@ -282,6 +283,7 @@ PUT my_index } } -------------------------------------------------- +// CONSOLE ===== `text`-only mappings for strings @@ -311,6 +313,7 @@ PUT my_index } } -------------------------------------------------- +// CONSOLE ===== Disabled norms @@ -345,6 +348,7 @@ PUT my_index } } -------------------------------------------------- +// CONSOLE The sub `keyword` field appears in this template to be consistent with the default rules of dynamic mappings. Of course if you do not need them because @@ -388,6 +392,7 @@ PUT my_index } } -------------------------------------------------- +// CONSOLE <1> Like the default dynamic mapping rules, doubles are mapped as floats, which are usually accurate enough, yet require half the disk space. diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index d4bf76b0cef..4ac7ec03f61 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -9,6 +9,8 @@ The following range types are supported: `long_range`:: A range of signed 64-bit integers with a minimum value of +-2^63^+ and maximum of +2^63^-1+. `double_range`:: A range of double-precision 64-bit IEEE 754 floating point values. `date_range`:: A range of date values represented as unsigned 64-bit integer milliseconds elapsed since system epoch. +`ip_range` :: A range of ip values supporting either https://en.wikipedia.org/wiki/IPv4[IPv4] or + https://en.wikipedia.org/wiki/IPv6[IPv6] (or mixed) addresses. Below is an example of configuring a mapping with various range fields followed by an example that indexes several range types. diff --git a/docs/reference/migration/migrate_6_0/docs.asciidoc b/docs/reference/migration/migrate_6_0/docs.asciidoc index b7a08ea8930..9b4ad82a32d 100644 --- a/docs/reference/migration/migrate_6_0/docs.asciidoc +++ b/docs/reference/migration/migrate_6_0/docs.asciidoc @@ -1,11 +1,11 @@ [[breaking_60_docs_changes]] === Document API changes -==== version type 'force' removed +==== version type `force` removed Document modification operations may no longer specify the `version_type` of `force` to override any previous version checks. -==== <<upserts>> no longer support versions +==== <<upserts,Upserts>> no longer support versions Adding a `version` to an upsert request is no longer supported. diff --git a/docs/reference/migration/migrate_6_0/indices.asciidoc b/docs/reference/migration/migrate_6_0/indices.asciidoc index a1d1ffd578d..0a05fd55139 100644 --- a/docs/reference/migration/migrate_6_0/indices.asciidoc +++ b/docs/reference/migration/migrate_6_0/indices.asciidoc @@ -1,7 +1,7 @@ [[breaking_60_indices_changes]] -=== Templates changes +=== Indices changes -==== `template` is now `index_patterns` +==== Index templates use `index_patterns` instead of `template` Previously templates expressed the indices that they should match using a glob style pattern in the `template` field. They should now use the `index_patterns` @@ -29,7 +29,7 @@ PUT _template/template_2 // CONSOLE -=== Shadow Replicas have been removed +==== Shadow Replicas have been removed Shadow replicas don't see enough usage, and have been removed. This includes the following settings: @@ -38,7 +38,7 @@ following settings: - `index.shadow_replicas` - `node.add_lock_id_to_custom_path` -=== Open/Close index API allows wildcard expressions that match no indices by default +==== Open/Close index API allows wildcard expressions that match no indices by default The default value of the `allow_no_indices` option for the Open/Close index API has been changed from `false` to `true` so it is aligned with the behaviour of the diff --git a/docs/reference/migration/migrate_6_0/mappings.asciidoc b/docs/reference/migration/migrate_6_0/mappings.asciidoc index 754f4fe7266..e85b31d97ff 100644 --- a/docs/reference/migration/migrate_6_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_6_0/mappings.asciidoc @@ -4,7 +4,7 @@ ==== Coercion of boolean fields Previously, Elasticsearch recognized the strings `true`, `false`, `on`, `off`, `yes`, `no`, `0`, `1` as booleans. Elasticsearch 6.0 -recognizes only `true` and `false` as boolean and will throw an error otherwise. For backwards compatibility purposes, during the 6.x +recognizes only the strings `true` and `false` as booleans and will throw an error otherwise. For backwards compatibility purposes, during the 6.x series the previous coercion rules will continue to work on pre-6.0 indices. This means that you do not need to change affected existing mappings immediately. However, it is not possible to create new indices from existing index templates that violate the strict `boolean` coercion rules. @@ -20,7 +20,7 @@ created with Elasticsearch version 6.0 or later. ==== The `include_in_all` mapping parameter is now disallowed -Since the `_all` field is now disabled by default and cannot be configured for +Since the ++_all++ field is now disabled by default and cannot be configured for indices created with Elasticsearch 6.0 or later, the `include_in_all` setting is now disallowed for these indices' mappings. diff --git a/docs/reference/migration/migrate_6_0/rest.asciidoc b/docs/reference/migration/migrate_6_0/rest.asciidoc index 5ef09a15bff..cfd8a9511a2 100644 --- a/docs/reference/migration/migrate_6_0/rest.asciidoc +++ b/docs/reference/migration/migrate_6_0/rest.asciidoc @@ -48,7 +48,7 @@ Refresh requests that are broadcast to multiple shards that can have one or more shards fail during the request now return a 500 response instead of a 200 response in the event there is at least one failure. -=== Delete by Query API requires an explicit query +==== Delete by Query API requires an explicit query In previous versions of Elasticsearch, delete by query requests without an explicit query were accepted, match_all was used as the default query and all documents were deleted diff --git a/docs/reference/migration/migrate_6_0/scripting.asciidoc b/docs/reference/migration/migrate_6_0/scripting.asciidoc index 67402009223..075678b3ab3 100644 --- a/docs/reference/migration/migrate_6_0/scripting.asciidoc +++ b/docs/reference/migration/migrate_6_0/scripting.asciidoc @@ -1,10 +1,10 @@ [[breaking_60_scripting_changes]] === Scripting changes -==== Groovy language removed +==== Groovy, JavaScript, and Python languages removed -The groovy scripting language was deprecated in elasticsearch 5.0 and is now removed. -Use painless instead. +The Groovy, JavaScript, and Python scripting languages were deprecated in +elasticsearch 5.0 and have now been removed. Use painless instead. ==== Date fields now return dates diff --git a/docs/reference/migration/migrate_6_0/stats.asciidoc b/docs/reference/migration/migrate_6_0/stats.asciidoc index 633604f39ae..ed70d1503c4 100644 --- a/docs/reference/migration/migrate_6_0/stats.asciidoc +++ b/docs/reference/migration/migrate_6_0/stats.asciidoc @@ -5,3 +5,13 @@ Given that store throttling has been removed, the `store` stats do not report `throttle_time` anymore. + +==== FS stats no longer reports if the disk spins + +Elasticsearch has defaulted to assuming that it is running on SSDs since +the 2.x series of Elasticsearch. As such, Elasticsearch no longer needs to +collect information from the operating system as to whether or not the +underlying disks of each data path spin or not. While this functionality was no +longer needed starting in the 2.x series of Elasticsearch, it was maintained in +the filesystem section of the nodes stats APIs. This information has now been +removed. diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index a91a56dd270..c4e7d4d3b48 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -105,6 +105,8 @@ POST /cluster_one:twitter/tweet/_search "match_all": {} } -------------------------------------------------- +// CONSOLE +// TEST[skip:we don't have two clusters set up during docs testing] In contrast to the `tribe` feature cross cluster search can also search indices with the same name on different clusters: @@ -116,6 +118,8 @@ POST /cluster_one:twitter,twitter/tweet/_search "match_all": {} } -------------------------------------------------- +// CONSOLE +// TEST[skip:we don't have two clusters set up during docs testing] Search results are disambiguated the same way as the indices are disambiguated in the request. Even if index names are identical these indices will be treated as different indices when results are merged. All results retrieved from a @@ -124,7 +128,7 @@ will be prefixed with their remote cluster name: [source,js] -------------------------------------------------- - { +{ "took" : 89, "timed_out" : false, "_shards" : { @@ -162,6 +166,7 @@ will be prefixed with their remote cluster name: } } -------------------------------------------------- +// TESTRESPONSE [float] === Cross cluster search settings @@ -188,5 +193,3 @@ will be prefixed with their remote cluster name: to `false` (defaults to `true`) to prevent certain nodes from connecting to remote clusters. Cross-cluster search requests must be sent to a node that is allowed to act as a cross-cluster client. - - diff --git a/docs/reference/modules/scripting/painless-syntax.asciidoc b/docs/reference/modules/scripting/painless-syntax.asciidoc index e3a6ed24bc0..15656d72f2c 100644 --- a/docs/reference/modules/scripting/painless-syntax.asciidoc +++ b/docs/reference/modules/scripting/painless-syntax.asciidoc @@ -207,7 +207,7 @@ In addition to Java's `enhanced for` loop, the `for in` syntax from groovy can a [source,painless] --------------------------------------------------------- -for (item : list) { +for (def item : list) { ... } --------------------------------------------------------- diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 267525b1b3c..909d2cb6d1c 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -3,5 +3,11 @@ [partintro] -- -This section will summarize the changes in released versions. +This section summarizes the changes in each release. + +* <<release-notes-6.0.0-alpha1>> +* <<release-notes-6.0.0-alpha1-5x>> + -- +include::release-notes/6.0.0-alpha1.asciidoc[] +include::release-notes/6.0.0-alpha1-5x.asciidoc[] diff --git a/docs/reference/release-notes/6.0.0-alpha1-5x.asciidoc b/docs/reference/release-notes/6.0.0-alpha1-5x.asciidoc new file mode 100644 index 00000000000..699260fd0c8 --- /dev/null +++ b/docs/reference/release-notes/6.0.0-alpha1-5x.asciidoc @@ -0,0 +1,1096 @@ +[[release-notes-6.0.0-alpha1-5x]] +== 6.0.0-alpha1 Release Notes (Changes previously released in 5.x) + +The changes listed below were first released in the 5.x series. Changes +released for the first time in Elasticsearch 6.0.0-alpha1 are listed in +<<release-notes-6.0.0-alpha1>>. + +[[breaking-6.0.0-alpha1-5x]] +[float] +=== Breaking changes + +Aliases:: +* Validate alias names the same as index names {pull}20771[#20771] (issue: {issue}20748[#20748]) + +CRUD:: +* Fixed naming inconsistency for fields/stored_fields in the APIs {pull}20166[#20166] (issues: {issue}18943[#18943], {issue}20155[#20155]) + +Core:: +* Add system call filter bootstrap check {pull}21940[#21940] +* Remove ignore system bootstrap checks {pull}20511[#20511] + +Internal:: +* `_flush` should block by default {pull}20597[#20597] (issue: {issue}20569[#20569]) + +Packaging:: +* Rename service.bat to elasticsearch-service.bat {pull}20496[#20496] (issue: {issue}17528[#17528]) + +Plugin Lang Painless:: +* Remove all date 'now' methods from Painless {pull}20766[#20766] (issue: {issue}20762[#20762]) + +Query DSL:: +* Fix name of `enabled_position_increments` {pull}22895[#22895] + +REST:: +* Change separator for shards preference {pull}20786[#20786] (issues: {issue}20722[#20722], {issue}20769[#20769]) + +Search:: +* Remove DFS_QUERY_AND_FETCH as a search type {pull}22787[#22787] + +Settings:: +* Remove support for default settings {pull}24093[#24093] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074]) + + + +[[breaking-java-6.0.0-alpha1-5x]] +[float] +=== Breaking Java changes + +Aggregations:: +* Move getProperty method out of MultiBucketsAggregation.Bucket interface {pull}23988[#23988] +* Remove getProperty method from Aggregations interface and impl {pull}23972[#23972] +* Move getProperty method out of Aggregation interface {pull}23949[#23949] + +Allocation:: +* Cluster Explain API uses the allocation process to explain shard allocation decisions {pull}22182[#22182] (issues: {issue}20347[#20347], {issue}20634[#20634], {issue}21103[#21103], {issue}21662[#21662], {issue}21691[#21691]) + +Cluster:: +* Remove PROTO-based custom cluster state components {pull}22336[#22336] (issue: {issue}21868[#21868]) + +Core:: +* Remove ability to plug-in TransportService {pull}20505[#20505] + +Discovery:: +* Remove pluggability of ElectMasterService {pull}21031[#21031] + +Exceptions:: +* Remove `IndexTemplateAlreadyExistsException` and `IndexShardAlreadyExistsException` {pull}21539[#21539] (issue: {issue}21494[#21494]) +* Replace IndexAlreadyExistsException with ResourceAlreadyExistsException {pull}21494[#21494] + +Ingest:: +* Change type of ingest doc meta-data field 'TIMESTAMP' to `Date` {pull}22234[#22234] (issue: {issue}22074[#22074]) + +Internal:: +* Replace SearchExtRegistry with namedObject {pull}22492[#22492] +* Replace Suggesters with namedObject {pull}22491[#22491] +* Consolidate the last easy parser construction {pull}22095[#22095] +* Introduce XContentParser#namedObject {pull}22003[#22003] +* Pass executor name to request interceptor to support async intercept calls {pull}21089[#21089] +* Remove TransportService#registerRequestHandler leniency {pull}20469[#20469] (issue: {issue}20468[#20468]) + +Java API:: +* Fold InternalSearchHits and friends into their interfaces {pull}23042[#23042] + +Network:: +* Remove HttpServer and HttpServerAdapter in favor of a simple dispatch method {pull}22636[#22636] (issue: {issue}18482[#18482]) +* Unguice Transport and friends {pull}20526[#20526] + +Plugins:: +* Deguice rest handlers {pull}22575[#22575] +* Plugins: Replace Rest filters with RestHandler wrapper {pull}21905[#21905] +* Plugins: Remove support for onModule {pull}21416[#21416] +* Cleanup sub fetch phase extension point {pull}20382[#20382] + +Query DSL:: +* Resolve index names in indices_boost {pull}21393[#21393] (issue: {issue}4756[#4756]) + +Scripting:: +* Refactor ScriptType to be a Top-Level Class {pull}21136[#21136] + +Search:: +* Remove QUERY_AND_FETCH search type {pull}22996[#22996] +* Cluster search shards improvements: expose ShardId, adjust visibility of some members {pull}21752[#21752] + + + +[[deprecation-6.0.0-alpha1-5x]] +[float] +=== Deprecations + +Java API:: +* Add BulkProcessor methods with XContentType parameter {pull}23078[#23078] (issue: {issue}22691[#22691]) +* Deprecate and remove "minimumNumberShouldMatch" in BoolQueryBuilder {pull}22403[#22403] + +Plugin Repository S3:: +* S3 Repository: Deprecate remaining `repositories.s3.*` settings {pull}24144[#24144] (issue: {issue}24143[#24143]) +* Deprecate specifying credentials through env vars, sys props, and remove profile files {pull}22567[#22567] (issues: {issue}21041[#21041], {issue}22479[#22479]) + +Query DSL:: +* Add deprecation logging message for 'fuzzy' query {pull}20993[#20993] (issue: {issue}15760[#15760]) + +REST:: +* Optionally require a valid content type for all rest requests with content {pull}22691[#22691] (issue: {issue}19388[#19388]) + +Scripting:: +* Change Namespace for Stored Script to Only Use Id {pull}22206[#22206] + +Shadow Replicas:: +* Add a deprecation notice to shadow replicas {pull}22647[#22647] (issue: {issue}22024[#22024]) + +Stats:: +* Deprecate _field_stats endpoint {pull}23914[#23914] + + + +[[feature-6.0.0-alpha1-5x]] +[float] +=== New features + +Aggregations:: +* Initial version of an adjacency matrix using the Filters aggregation {pull}22239[#22239] (issue: {issue}22169[#22169]) + +Analysis:: +* Adds pattern keyword marker filter support {pull}23600[#23600] (issue: {issue}4877[#4877]) +* Expose WordDelimiterGraphTokenFilter {pull}23327[#23327] (issue: {issue}23104[#23104]) +* Synonym Graph Support (LUCENE-6664) {pull}21517[#21517] +* Expose Lucenes Ukrainian analyzer {pull}21176[#21176] (issue: {issue}19433[#19433]) + +CAT API:: +* Provides a cat api endpoint for templates. {pull}20545[#20545] (issue: {issue}20467[#20467]) + +CRUD:: +* Allow an index to be partitioned with custom routing {pull}22274[#22274] (issue: {issue}21585[#21585]) + +Highlighting:: +* Integrate UnifiedHighlighter {pull}21621[#21621] (issue: {issue}21376[#21376]) + +Index APIs:: +* Add FieldCapabilities (_field_caps) API {pull}23007[#23007] (issue: {issue}22438[#22438]) + +Ingest:: +* introduce KV Processor in Ingest Node {pull}22272[#22272] (issue: {issue}22222[#22222]) + +Mapping:: +* Add the ability to set a normalizer on keyword fields. {pull}21919[#21919] (issue: {issue}18064[#18064]) +* Add RangeFieldMapper for numeric and date range types {pull}21002[#21002] (issue: {issue}20999[#20999]) + +Plugin Discovery File:: +* File-based discovery plugin {pull}20394[#20394] (issue: {issue}20323[#20323]) + +Query DSL:: +* Add "all fields" execution mode to simple_query_string query {pull}21341[#21341] (issues: {issue}19784[#19784], {issue}20925[#20925]) +* Add support for `quote_field_suffix` to `simple_query_string`. {pull}21060[#21060] (issue: {issue}18641[#18641]) +* Add "all field" execution mode to query_string query {pull}20925[#20925] (issue: {issue}19784[#19784]) + +Reindex API:: +* Add automatic parallelization support to reindex and friends {pull}20767[#20767] (issue: {issue}20624[#20624]) + +Search:: +* Introduce incremental reduction of TopDocs {pull}23946[#23946] +* Add federated cross cluster search capabilities {pull}22502[#22502] (issue: {issue}21473[#21473]) +* Add field collapsing for search request {pull}22337[#22337] (issue: {issue}21833[#21833]) + +Settings:: +* Add infrastructure for elasticsearch keystore {pull}22335[#22335] + +Similarities:: +* Adds boolean similarity to Elasticsearch {pull}23637[#23637] (issue: {issue}6731[#6731]) + + + +[[enhancement-6.0.0-alpha1-5x]] +[float] +=== Enhancements + +Aggregations:: +* Add `count` to rest output of `geo_centroid` {pull}24387[#24387] (issue: {issue}24366[#24366]) +* Allow scripted metric agg to access `_score` {pull}24295[#24295] +* Add BucketMetricValue interface {pull}24188[#24188] +* Move aggs CommonFields and TYPED_KEYS_DELIMITER from InternalAggregation to Aggregation {pull}23987[#23987] +* Use ParseField for aggs CommonFields rather than String {pull}23717[#23717] +* Share XContent rendering code in terms aggs {pull}23680[#23680] +* Add unit tests for ParentToChildAggregator {pull}23305[#23305] (issue: {issue}22278[#22278]) +* First step towards incremental reduction of query responses {pull}23253[#23253] +* `value_type` is useful regardless of scripting. {pull}22160[#22160] (issue: {issue}20163[#20163]) +* Support for partitioning set of terms {pull}21626[#21626] (issue: {issue}21487[#21487]) +* Rescorer should be applied in the TopHits aggregation {pull}20978[#20978] (issue: {issue}19317[#19317]) + +Aliases:: +* Handle multiple aliases in _cat/aliases api {pull}23698[#23698] (issue: {issue}23661[#23661]) + +Allocation:: +* Trigger replica recovery restarts by master when primary relocation completes {pull}23926[#23926] (issue: {issue}23904[#23904]) +* Makes the same_shard host dynamically updatable {pull}23397[#23397] (issue: {issue}22992[#22992]) +* Include stale replica shard info when explaining an unassigned primary {pull}22826[#22826] +* Adds setting level to allocation decider explanations {pull}22268[#22268] (issue: {issue}21771[#21771]) +* Improves allocation decider decision explanation messages {pull}21771[#21771] +* Prepares allocator decision objects for use with the allocation explain API {pull}21691[#21691] +* Balance step in BalancedShardsAllocator for a single shard {pull}21103[#21103] +* Process more expensive allocation deciders last {pull}20724[#20724] (issue: {issue}12815[#12815]) +* Separates decision making from decision application in BalancedShardsAllocator {pull}20634[#20634] + +Analysis:: +* Support Keyword type in Analyze API {pull}23161[#23161] +* Expose FlattenGraphTokenFilter {pull}22643[#22643] +* Analyze API Position Length Support {pull}22574[#22574] +* Remove AnalysisService and reduce it to a simple name to analyzer mapping {pull}20627[#20627] (issues: {issue}19827[#19827], {issue}19828[#19828]) + +CAT API:: +* Adding built-in sorting capability to _cat apis. {pull}20658[#20658] (issue: {issue}16975[#16975]) +* Add health status parameter to cat indices API {pull}20393[#20393] + +CRUD:: +* Use correct block levels for TRA subclasses {pull}22224[#22224] +* Make index and delete operation execute as a single bulk item {pull}21964[#21964] + +Cache:: +* Do not cache term queries. {pull}21566[#21566] (issues: {issue}16031[#16031], {issue}20116[#20116]) +* Parse alias filters on the coordinating node {pull}20916[#20916] + +Circuit Breakers:: +* Closing a ReleasableBytesStreamOutput closes the underlying BigArray {pull}23941[#23941] +* Add used memory amount to CircuitBreakingException message (#22521) {pull}22693[#22693] (issue: {issue}22521[#22521]) +* Cluster Settings Updates should not trigger circuit breakers. {pull}20827[#20827] + +Cluster:: +* Extract a common base class to allow services to listen to remote cluster config updates {pull}24367[#24367] +* Prevent nodes from joining if newer indices exist in the cluster {pull}23843[#23843] +* Connect to new nodes concurrently {pull}22984[#22984] (issue: {issue}22828[#22828]) +* Keep NodeConnectionsService in sync with current nodes in the cluster state {pull}22509[#22509] +* Add a generic way of checking version before serializing custom cluster object {pull}22376[#22376] (issue: {issue}22313[#22313]) +* Add validation for supported index version on node join, restore, upgrade & open index {pull}21830[#21830] (issue: {issue}21670[#21670]) +* Let ClusterStateObserver only hold onto state that's needed for change detection {pull}21631[#21631] (issue: {issue}21568[#21568]) +* Cache successful shard deletion checks {pull}21438[#21438] +* Remove mutable status field from cluster state {pull}21379[#21379] +* Skip shard management code when updating cluster state on client/tribe nodes {pull}20731[#20731] +* Add clusterUUID to RestMainAction output {pull}20503[#20503] + +Core:: +* Regex upgrades {pull}24316[#24316] (issue: {issue}24226[#24226]) +* Detect remnants of path.data/default.path.data bug {pull}24099[#24099] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074], {issue}24093[#24093]) +* Await termination after shutting down executors {pull}23889[#23889] +* Add early-access check {pull}23743[#23743] (issue: {issue}23668[#23668]) +* Adapter action future should restore interrupts {pull}23618[#23618] (issue: {issue}23617[#23617]) +* Disable bootstrap checks for single-node discovery {pull}23598[#23598] (issues: {issue}23585[#23585], {issue}23595[#23595]) +* Enable explicitly enforcing bootstrap checks {pull}23585[#23585] (issue: {issue}21864[#21864]) +* Add equals/hashcode method to ReplicationResponse {pull}23215[#23215] +* Simplify ElasticsearchException rendering as a XContent {pull}22611[#22611] +* Remove setLocalNode from ClusterService and TransportService {pull}22608[#22608] +* Rename bootstrap.seccomp to bootstrap.system_call_filter {pull}22226[#22226] (issue: {issue}21940[#21940]) +* Cleanup random stats serialization code {pull}22223[#22223] +* Avoid corruption when deserializing booleans {pull}22152[#22152] +* Reduce memory pressure when sending large terms queries. {pull}21776[#21776] +* Install a security manager on startup {pull}21716[#21716] +* Log node ID on startup {pull}21673[#21673] +* Ensure source filtering automatons are only compiled once {pull}20857[#20857] (issue: {issue}20839[#20839]) +* Improve scheduling fairness when batching cluster state changes with equal priority {pull}20775[#20775] (issue: {issue}20768[#20768]) +* Add production warning for pre-release builds {pull}20674[#20674] +* Add serial collector bootstrap check {pull}20558[#20558] +* Do not log full bootstrap checks exception {pull}19989[#19989] + +Dates:: +* Improve error handling for epoch format parser with time zone (#22621) {pull}23689[#23689] + +Discovery:: +* Introduce single-node discovery {pull}23595[#23595] +* UnicastZenPing shouldn't ping the address of the local node {pull}23567[#23567] +* MasterFaultDetection can start after the initial cluster state has been processed {pull}23037[#23037] (issue: {issue}22828[#22828]) +* Simplify Unicast Zen Ping {pull}22277[#22277] (issues: {issue}19370[#19370], {issue}21739[#21739], {issue}22120[#22120], {issue}22194[#22194]) +* Prefer joining node with conflicting transport address when becoming master {pull}22134[#22134] (issues: {issue}22049[#22049], {issue}22120[#22120]) + +Engine:: +* Engine: store maxUnsafeAutoIdTimestamp in commit {pull}24149[#24149] +* Replace EngineClosedException with AlreadyClosedExcpetion {pull}22631[#22631] + +Exceptions:: +* Add BWC layer for Exceptions {pull}21694[#21694] (issue: {issue}21656[#21656]) + +Geo:: +* Optimize geo-distance sorting. {pull}20596[#20596] (issue: {issue}20450[#20450]) + +Highlighting:: +* Add support for fragment_length in the unified highlighter {pull}23431[#23431] +* Add BreakIteratorBoundaryScanner support {pull}23248[#23248] + +Index APIs:: +* Open and close index to honour allow_no_indices option {pull}24222[#24222] (issue: {issue}24031[#24031]) +* Wildcard cluster names for cross cluster search {pull}23985[#23985] (issue: {issue}23893[#23893]) +* Indexing: Add shard id to indexing operation listener {pull}22606[#22606] +* Better error when can't auto create index {pull}22488[#22488] (issues: {issue}21448[#21448], {issue}22435[#22435]) +* Add date-math support to `_rollover` {pull}20709[#20709] + +Ingest:: +* Lazy load the geoip databases {pull}23337[#23337] +* add `ignore_missing` flag to ingest plugins {pull}22273[#22273] +* Added ability to remove pipelines via wildcards (#22149) {pull}22191[#22191] (issue: {issue}22149[#22149]) +* Enables the ability to inject serialized json fields into root of document {pull}22179[#22179] (issue: {issue}21898[#21898]) +* compile ScriptProcessor inline scripts when creating ingest pipelines {pull}21858[#21858] (issue: {issue}21842[#21842]) +* add `ignore_missing` option to SplitProcessor {pull}20982[#20982] (issues: {issue}19995[#19995], {issue}20840[#20840]) +* add ignore_missing option to convert,trim,lowercase,uppercase,grok,rename {pull}20194[#20194] (issue: {issue}19995[#19995]) +* introduce the JSON Processor {pull}20128[#20128] (issue: {issue}20052[#20052]) + +Internal:: +* Log JVM arguments on startup {pull}24451[#24451] +* Move RemoteClusterService into TransportService {pull}24424[#24424] +* Enum related performance additions. {pull}24274[#24274] (issue: {issue}24226[#24226]) +* Add a dedicated TransportRemoteInfoAction for consistency {pull}24040[#24040] (issue: {issue}23969[#23969]) +* Simplify sorted top docs merging in SearchPhaseController {pull}23881[#23881] +* Synchronized CollapseTopFieldDocs with lucenes relatives {pull}23854[#23854] +* Cleanup SearchPhaseController interface {pull}23844[#23844] +* Do not create String instances in 'Strings' methods accepting StringBuilder {pull}22907[#22907] +* Improve connection closing in `RemoteClusterConnection` {pull}22804[#22804] (issue: {issue}22803[#22803]) +* Remove some more usages of ParseFieldMatcher {pull}22437[#22437] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some more usages of ParseFieldMatcher {pull}22398[#22398] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some more usages of ParseFieldMatcher {pull}22395[#22395] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some ParseFieldMatcher usages {pull}22389[#22389] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Introduce ToXContentObject interface {pull}22387[#22387] (issue: {issue}16347[#16347]) +* Add infrastructure to manage network connections outside of Transport/TransportService {pull}22194[#22194] +* Replace strict parsing mode with response headers assertions {pull}22130[#22130] (issues: {issue}11859[#11859], {issue}19552[#19552], {issue}20993[#20993]) +* Start using `ObjectParser` for aggs. {pull}22048[#22048] (issue: {issue}22009[#22009]) +* Don't output null source node in RecoveryFailedException {pull}21963[#21963] +* ClusterService should expose "applied" cluster states (i.e., remove ClusterStateStatus) {pull}21817[#21817] +* Rename ClusterState#lookupPrototypeSafe to `lookupPrototype` and remove "unsafe" unused variant {pull}21686[#21686] +* ShardActiveResponseHandler shouldn't hold to an entire cluster state {pull}21470[#21470] (issue: {issue}21394[#21394]) +* Remove unused ClusterService dependency from SearchPhaseController {pull}21421[#21421] +* Remove special case in case no action filters are registered {pull}21251[#21251] +* Use TimveValue instead of long for CacheBuilder methods {pull}20887[#20887] +* Remove SearchContext#current and all it's threadlocals {pull}20778[#20778] (issue: {issue}19341[#19341]) +* Remove poor-mans compression in InternalSearchHit and friends {pull}20472[#20472] + +Java API:: +* Added types options to DeleteByQueryRequest {pull}23265[#23265] (issue: {issue}21984[#21984]) +* prevent NPE when trying to uncompress a null BytesReference {pull}22386[#22386] + +Java High Level REST Client:: +* Add utility method to parse named XContent objects with typed prefix {pull}24240[#24240] (issue: {issue}22965[#22965]) +* Convert suggestion response parsing to use NamedXContentRegistry {pull}23355[#23355] +* UpdateRequest implements ToXContent {pull}23289[#23289] +* Add javadoc for DocWriteResponse.Builders {pull}23267[#23267] +* Expose WriteRequest.RefreshPolicy string representation {pull}23106[#23106] +* Use `typed_keys` parameter to prefix suggester names by type in search responses {pull}23080[#23080] (issue: {issue}22965[#22965]) +* Add parsing from xContent to MainResponse {pull}22934[#22934] +* Parse elasticsearch exception's root causes {pull}22924[#22924] +* Add parsing method to BytesRestResponse's error {pull}22873[#22873] +* Add parsing methods to BulkItemResponse {pull}22859[#22859] +* Add parsing method for ElasticsearchException.generateFailureXContent() {pull}22815[#22815] +* Add parsing method for ElasticsearchException.generateThrowableXContent() {pull}22783[#22783] +* Add parsing methods for UpdateResponse {pull}22586[#22586] +* Add parsing from xContent to InternalSearchHit and InternalSearchHits {pull}22429[#22429] +* Add fromxcontent methods to index response {pull}22229[#22229] +* Add fromXContent() methods for ReplicationResponse {pull}22196[#22196] (issue: {issue}22082[#22082]) +* Add parsing method for ElasticsearchException {pull}22143[#22143] +* Add fromXContent method to GetResponse {pull}22082[#22082] + +Java REST Client:: +* move ignore parameter support from yaml test client to low level rest client {pull}22637[#22637] +* Warn log deprecation warnings received from server {pull}21895[#21895] +* Support Preemptive Authentication with RestClient {pull}21336[#21336] +* Provide error message when rest request path is null {pull}21233[#21233] (issue: {issue}21232[#21232]) + +Logging:: +* Log deleting indices at info level {pull}22627[#22627] (issue: {issue}22605[#22605]) +* Expose logs base path {pull}22625[#22625] +* Log failure to connect to node at info instead of debug {pull}21809[#21809] (issue: {issue}6468[#6468]) +* Truncate log messages from the end {pull}21609[#21609] (issue: {issue}21602[#21602]) +* Ensure logging is initialized in CLI tools {pull}20575[#20575] +* Give useful error message if log config is missing {pull}20493[#20493] +* Complete Elasticsearch logger names {pull}20457[#20457] (issue: {issue}20326[#20326]) +* Logging shutdown hack {pull}20389[#20389] (issue: {issue}20304[#20304]) +* Disable console logging {pull}20387[#20387] +* Warn on not enough masters during election {pull}20063[#20063] (issue: {issue}8362[#8362]) + +Mapping:: +* Only allow one type on 6.0 indices {pull}24317[#24317] (issue: {issue}15613[#15613]) +* token_count type : add an option to count tokens (fix #23227) {pull}24175[#24175] (issue: {issue}23227[#23227]) +* Atomic mapping updates across types {pull}22220[#22220] +* Only update DocumentMapper if field type changes {pull}22165[#22165] +* Better error message when _parent isn't an object {pull}21987[#21987] +* Create the QueryShardContext lazily in DocumentMapperParser. {pull}21287[#21287] + +Nested Docs:: +* Avoid adding unnecessary nested filters when ranges are used. {pull}23427[#23427] + +Network:: +* Set available processors for Netty {pull}24420[#24420] (issue: {issue}6224[#6224]) +* Adjust default Netty receive predictor size to 64k {pull}23542[#23542] (issue: {issue}23185[#23185]) +* Keep the pipeline handler queue small initially {pull}23335[#23335] +* Set network receive predictor size to 32kb {pull}23284[#23284] (issue: {issue}23185[#23185]) +* TransportService.connectToNode should validate remote node ID {pull}22828[#22828] (issue: {issue}22194[#22194]) +* Disable the Netty recycler {pull}22452[#22452] (issues: {issue}22189[#22189], {issue}22360[#22360], {issue}22406[#22406], {issue}5904[#5904]) +* Tell Netty not to be unsafe in transport client {pull}22284[#22284] +* Introduce a low level protocol handshake {pull}22094[#22094] +* Detach handshake from connect to node {pull}22037[#22037] +* Reduce number of connections per node depending on the nodes role {pull}21849[#21849] +* Add a connect timeout to the ConnectionProfile to allow per node connect timeouts {pull}21847[#21847] (issue: {issue}19719[#19719]) +* Grant Netty permission to read system somaxconn {pull}21840[#21840] +* Remove connectToNodeLight and replace it with a connection profile {pull}21799[#21799] +* Lazy resolve unicast hosts {pull}21630[#21630] (issues: {issue}14441[#14441], {issue}16412[#16412]) +* Fix handler name on message not fully read {pull}21478[#21478] +* Handle rejected pings on shutdown gracefully {pull}20842[#20842] +* Network: Allow to listen on virtual interfaces. {pull}19568[#19568] (issues: {issue}17473[#17473], {issue}19537[#19537]) + +Packaging:: +* Introduce Java version check {pull}23194[#23194] (issue: {issue}21102[#21102]) +* Improve the out-of-the-box experience {pull}21920[#21920] (issues: {issue}18317[#18317], {issue}21783[#21783]) +* Add empty plugins dir for archive distributions {pull}21204[#21204] (issue: {issue}20342[#20342]) +* Make explicit missing settings for Windows service {pull}21200[#21200] (issue: {issue}18317[#18317]) +* Change permissions on config files {pull}20966[#20966] +* Add quiet option to disable console logging {pull}20422[#20422] (issues: {issue}15315[#15315], {issue}16159[#16159], {issue}17220[#17220]) + +Percolator:: +* Allowing range queries with now ranges inside percolator queries {pull}23921[#23921] (issue: {issue}23859[#23859]) +* Add term extraction support for MultiPhraseQuery {pull}23176[#23176] + +Plugin Discovery EC2:: +* Settings: Migrate ec2 discovery sensitive settings to elasticsearch keystore {pull}23961[#23961] (issue: {issue}22475[#22475]) +* Add support for ca-central-1 region to EC2 and S3 plugins {pull}22458[#22458] (issue: {issue}22454[#22454]) +* Support for eu-west-2 (London) cloud-aws plugin {pull}22308[#22308] (issue: {issue}22306[#22306]) +* Add us-east-2 AWS region {pull}21961[#21961] (issue: {issue}21881[#21881]) +* Add setting to set read timeout for EC2 discovery and S3 repository plugins {pull}21956[#21956] (issue: {issue}19078[#19078]) + +Plugin Ingest GeoIp:: +* Cache results of geoip lookups {pull}22231[#22231] (issue: {issue}22074[#22074]) + +Plugin Lang Painless:: +* Allow painless to load stored fields {pull}24290[#24290] +* Start on custom whitelists for Painless {pull}23563[#23563] +* Fix Painless's implementation of interfaces returning primitives {pull}23298[#23298] (issue: {issue}22983[#22983]) +* Allow painless to implement more interfaces {pull}22983[#22983] +* Generate reference links for painless API {pull}22775[#22775] +* Painless: Add augmentation to String for base 64 {pull}22665[#22665] (issue: {issue}22648[#22648]) +* Improve painless's ScriptException generation {pull}21762[#21762] (issue: {issue}21733[#21733]) +* Add Debug.explain to painless {pull}21723[#21723] (issue: {issue}20263[#20263]) +* Implement the ?: operator in painless {pull}21506[#21506] +* In painless suggest a long constant if int won't do {pull}21415[#21415] (issue: {issue}21313[#21313]) +* Support decimal constants with trailing [dD] in painless {pull}21412[#21412] (issue: {issue}21116[#21116]) +* Implement reading from null safe dereferences {pull}21239[#21239] +* Painless negative offsets {pull}21080[#21080] (issue: {issue}20870[#20870]) +* Remove more equivalents of the now method from the Painless whitelist. {pull}21047[#21047] +* Disable regexes by default in painless {pull}20427[#20427] (issue: {issue}20397[#20397]) + +Plugin Repository Azure:: +* Add Backoff policy to azure repository {pull}23387[#23387] (issue: {issue}22728[#22728]) + +Plugin Repository S3:: +* Removes the retry mechanism from the S3 blob store {pull}23952[#23952] (issue: {issue}22845[#22845]) +* S3 Repository: Eagerly load static settings {pull}23910[#23910] +* S3 repository: Add named configurations {pull}22762[#22762] (issues: {issue}22479[#22479], {issue}22520[#22520]) +* Make the default S3 buffer size depend on the available memory. {pull}21299[#21299] + +Plugins:: +* Plugins: Add support for platform specific plugins {pull}24265[#24265] +* Plugins: Remove leniency for missing plugins dir {pull}24173[#24173] +* Modify permissions dialog for plugins {pull}23742[#23742] +* Plugins: Add plugin cli specific exit codes {pull}23599[#23599] (issue: {issue}15295[#15295]) +* Plugins: Output better error message when existing plugin is incompatible {pull}23562[#23562] (issue: {issue}20691[#20691]) +* Add the ability to define search response listeners in search plugin {pull}22682[#22682] +* Pass ThreadContext to transport interceptors to allow header modification {pull}22618[#22618] (issue: {issue}22585[#22585]) +* Provide helpful error message if a plugin exists {pull}22305[#22305] (issue: {issue}22084[#22084]) +* Add shutdown hook for closing CLI commands {pull}22126[#22126] (issue: {issue}22111[#22111]) +* Allow plugins to install bootstrap checks {pull}22110[#22110] +* Clarify that plugins can be closed {pull}21669[#21669] +* Plugins: Convert custom discovery to pull based plugin {pull}21398[#21398] +* Removing plugin that isn't installed shouldn't trigger usage information {pull}21272[#21272] (issue: {issue}21250[#21250]) +* Remove pluggability of ZenPing {pull}21049[#21049] +* Make UnicastHostsProvider extension pull based {pull}21036[#21036] +* Revert "Display plugins versions" {pull}20807[#20807] (issues: {issue}18683[#18683], {issue}20668[#20668]) +* Provide error message when plugin id is missing {pull}20660[#20660] + +Query DSL:: +* Make it possible to validate a query on all shards instead of a single random shard {pull}23697[#23697] (issue: {issue}18254[#18254]) +* QueryString and SimpleQueryString Graph Support {pull}22541[#22541] +* Additional Graph Support in Match Query {pull}22503[#22503] (issue: {issue}22490[#22490]) +* RangeQuery WITHIN case now normalises query {pull}22431[#22431] (issue: {issue}22412[#22412]) +* Un-deprecate fuzzy query {pull}22088[#22088] (issue: {issue}15760[#15760]) +* support numeric bounds with decimal parts for long/integer/short/byte datatypes {pull}21972[#21972] (issue: {issue}21600[#21600]) +* Using ObjectParser in MatchAllQueryBuilder and IdsQueryBuilder {pull}21273[#21273] +* Expose splitOnWhitespace in `Query String Query` {pull}20965[#20965] (issue: {issue}20841[#20841]) +* Throw error if query element doesn't end with END_OBJECT {pull}20528[#20528] (issue: {issue}20515[#20515]) +* Remove `lowercase_expanded_terms` and `locale` from query-parser options. {pull}20208[#20208] (issue: {issue}9978[#9978]) + +REST:: +* Allow passing single scrollID in clear scroll API body {pull}24242[#24242] (issue: {issue}24233[#24233]) +* Validate top-level keys when parsing mget requests {pull}23746[#23746] (issue: {issue}23720[#23720]) +* Cluster stats should not render empty http/transport types {pull}23735[#23735] +* Add parameter to prefix aggs name with type in search responses {pull}22965[#22965] +* Add a REST spec for the create API {pull}20924[#20924] +* Add response params to REST params did you mean {pull}20753[#20753] (issues: {issue}20722[#20722], {issue}20747[#20747]) +* Add did you mean to strict REST params {pull}20747[#20747] (issue: {issue}20722[#20722]) + +Reindex API:: +* Increase visibility of doExecute so it can be used directly {pull}22614[#22614] +* Improve error message when reindex-from-remote gets bad json {pull}22536[#22536] (issue: {issue}22330[#22330]) +* Reindex: Better error message for pipeline in wrong place {pull}21985[#21985] +* Timeout improvements for rest client and reindex {pull}21741[#21741] (issue: {issue}21707[#21707]) +* Add "simple match" support for reindex-from-remote whitelist {pull}21004[#21004] +* Make reindex-from-remote ignore unknown fields {pull}20591[#20591] (issue: {issue}20504[#20504]) + +Scripting:: +* Expose multi-valued dates to scripts and document painless's date functions {pull}22875[#22875] (issue: {issue}22162[#22162]) +* Wrap VerifyError in ScriptException {pull}21769[#21769] +* Log ScriptException's xcontent if file script compilation fails {pull}21767[#21767] (issue: {issue}21733[#21733]) +* Support binary field type in script values {pull}21484[#21484] (issue: {issue}14469[#14469]) +* Mustache: Add {{#url}}{{/url}} function to URL encode strings {pull}20838[#20838] +* Expose `ctx._now` in update scripts {pull}20835[#20835] (issue: {issue}17895[#17895]) + +Search:: +* Remove leniency when merging fetched hits in a search response phase {pull}24158[#24158] +* Set shard count limit to unlimited {pull}24012[#24012] +* Streamline shard index availability in all SearchPhaseResults {pull}23788[#23788] +* Search took time should use a relative clock {pull}23662[#23662] +* Prevent negative `from` parameter in SearchSourceBuilder {pull}23358[#23358] (issue: {issue}23324[#23324]) +* Remove unnecessary result sorting in SearchPhaseController {pull}23321[#23321] +* Expose `batched_reduce_size` via `_search` {pull}23288[#23288] (issue: {issue}23253[#23253]) +* Adding fromXContent to Suggest and Suggestion class {pull}23226[#23226] (issue: {issue}23202[#23202]) +* Adding fromXContent to Suggestion.Entry and subclasses {pull}23202[#23202] +* Add CollapseSearchPhase as a successor for the FetchSearchPhase {pull}23165[#23165] +* Integrate IndexOrDocValuesQuery. {pull}23119[#23119] +* Detach SearchPhases from AbstractSearchAsyncAction {pull}23118[#23118] +* Fix GraphQuery expectation after Lucene upgrade to 6.5 {pull}23117[#23117] (issue: {issue}23102[#23102]) +* Nested queries should avoid adding unnecessary filters when possible. {pull}23079[#23079] (issue: {issue}20797[#20797]) +* Add xcontent parsing to completion suggestion option {pull}23071[#23071] +* Add xcontent parsing to suggestion options {pull}23018[#23018] +* Separate reduce (aggs, suggest and profile) from merging fetched hits {pull}23017[#23017] +* Add a setting to disable remote cluster connections on a node {pull}23005[#23005] +* First step towards separating individual search phases {pull}22802[#22802] +* Add parsing from xContent to SearchProfileShardResults and nested classes {pull}22649[#22649] +* Move SearchTransportService and SearchPhaseController creation outside of TransportSearchAction constructor {pull}21754[#21754] +* Don't carry ShardRouting around when not needed in AbstractSearchAsyncAction {pull}21753[#21753] +* ShardSearchRequest to take ShardId constructor argument rather than the whole ShardRouting {pull}21750[#21750] +* Use index uuid as key in the alias filter map rather than the index name {pull}21749[#21749] +* Add indices and filter information to search shards api output {pull}21738[#21738] (issue: {issue}20916[#20916]) +* remove pointless catch exception in TransportSearchAction {pull}21689[#21689] +* Optimize query with types filter in the URL (t/t/_search) {pull}20979[#20979] +* Makes search action cancelable by task management API {pull}20405[#20405] + +Search Templates:: +* Add profile and explain parameters to template API {pull}20451[#20451] + +Settings:: +* Add secure file setting to keystore {pull}24001[#24001] +* Add a setting which specifies a list of setting {pull}23883[#23883] +* Add a property to mark setting as final {pull}23872[#23872] +* Remove obsolete index setting `index.version.minimum_compatible`. {pull}23593[#23593] +* Provide a method to retrieve a closeable char[] from a SecureString {pull}23389[#23389] +* Update indices settings api to support CBOR and SMILE format {pull}23309[#23309] (issues: {issue}23242[#23242], {issue}23245[#23245]) +* Improve setting deprecation message {pull}23156[#23156] (issue: {issue}22849[#22849]) +* Add secure settings validation on startup {pull}22894[#22894] +* Allow comma delimited array settings to have a space after each entry {pull}22591[#22591] (issue: {issue}22297[#22297]) +* Allow affix settings to be dynamic / updatable {pull}22526[#22526] +* Allow affix settings to delegate to actual settings {pull}22523[#22523] +* Make s3 repository sensitive settings use secure settings {pull}22479[#22479] +* Speed up filter and prefix settings operations {pull}22249[#22249] +* Add precise logging on unknown or invalid settings {pull}20951[#20951] (issue: {issue}20946[#20946]) + +Snapshot/Restore:: +* Ensure every repository has an incompatible-snapshots blob {pull}24403[#24403] (issue: {issue}22267[#22267]) +* Change snapshot status error to use generic SnapshotException {pull}24355[#24355] (issue: {issue}24225[#24225]) +* Duplicate snapshot name throws InvalidSnapshotNameException {pull}22921[#22921] (issue: {issue}18228[#18228]) +* Fixes retrieval of the latest snapshot index blob {pull}22700[#22700] +* Use general cluster state batching mechanism for snapshot state updates {pull}22528[#22528] (issue: {issue}14899[#14899]) +* Synchronize snapshot deletions on the cluster state {pull}22313[#22313] (issue: {issue}19957[#19957]) +* Abort snapshots on a node that leaves the cluster {pull}21084[#21084] (issue: {issue}20876[#20876]) + +Stats:: +* Show JVM arguments {pull}24450[#24450] +* Add cross-cluster search remote cluster info API {pull}23969[#23969] (issue: {issue}23925[#23925]) +* Add geo_point to FieldStats {pull}21947[#21947] (issue: {issue}20707[#20707]) +* Include unindexed field in FieldStats response {pull}21821[#21821] (issue: {issue}21952[#21952]) +* Remove load average leniency {pull}21380[#21380] +* Strengthen handling of unavailable cgroup stats {pull}21094[#21094] (issue: {issue}21029[#21029]) +* Add basic cgroup CPU metrics {pull}21029[#21029] + +Suggesters:: +* Provide informative error message in case of unknown suggestion context. {pull}24241[#24241] +* Allow different data types for category in Context suggester {pull}23491[#23491] (issue: {issue}22358[#22358]) + +Task Manager:: +* Allow task to be unregistered by ClusterStateApplier {pull}23931[#23931] +* Limit IndexRequest toString() length {pull}22832[#22832] +* Improve the error message if task and node isn't found {pull}22062[#22062] (issue: {issue}22027[#22027]) +* Add descriptions to create snapshot and restore snapshot tasks. {pull}21901[#21901] (issue: {issue}21768[#21768]) +* Add proper descriptions to reindex, update-by-query and delete-by-query tasks. {pull}21841[#21841] (issue: {issue}21768[#21768]) +* Add search task descriptions {pull}21740[#21740] + +Tribe Node:: +* Add support for merging custom meta data in tribe node {pull}21552[#21552] (issues: {issue}20544[#20544], {issue}20791[#20791], {issue}9372[#9372]) + + + +[[bug-6.0.0-alpha1-5x]] +[float] +=== Bug fixes + +Aggregations:: +* InternalPercentilesBucket should not rely on ordered percents array {pull}24336[#24336] (issue: {issue}24331[#24331]) +* Align behavior HDR percentiles iterator with percentile() method {pull}24206[#24206] +* The `filter` and `significant_terms` aggregations should parse the `filter` as a filter, not a query. {pull}23797[#23797] +* Completion suggestion should also consider text if prefix/regex is missing {pull}23451[#23451] (issue: {issue}23340[#23340]) +* Fixes the per term error in the terms aggregation {pull}23399[#23399] +* Fixes terms error count for multiple reduce phases {pull}23291[#23291] (issue: {issue}23286[#23286]) +* Fix scaled_float numeric type in aggregations {pull}22351[#22351] (issue: {issue}22350[#22350]) +* Allow terms aggregations on pure boolean scripts. {pull}22201[#22201] (issue: {issue}20941[#20941]) +* Fix numeric terms aggregations with includes/excludes and minDocCount=0 {pull}22141[#22141] (issue: {issue}22140[#22140]) +* Fix `missing` on aggs on `boolean` fields. {pull}22135[#22135] (issue: {issue}22009[#22009]) +* IP range masks exclude the maximum address of the range. {pull}22018[#22018] (issue: {issue}22005[#22005]) +* Fix `other_bucket` on the `filters` agg to be enabled if a key is set. {pull}21994[#21994] (issue: {issue}21951[#21951]) +* Rewrite Queries/Filter in FilterAggregationBuilder and ensure client usage marks query as non-cachable {pull}21303[#21303] (issue: {issue}21301[#21301]) +* Percentiles bucket fails for 100th percentile {pull}21218[#21218] +* Thread safety for scripted significance heuristics {pull}21113[#21113] (issue: {issue}18120[#18120]) +* `ip_range` aggregation should accept null bounds. {pull}21043[#21043] (issue: {issue}21006[#21006]) +* Fixes bug preventing script sort working on top_hits aggregation {pull}21023[#21023] (issue: {issue}21022[#21022]) +* Fixed writeable name from range to geo_distance {pull}20860[#20860] +* Fix date_range aggregation to not cache if now is used {pull}20740[#20740] +* The `top_hits` aggregation should compile scripts only once. {pull}20738[#20738] + +Allocation:: +* Cannot force allocate primary to a node where the shard already exists {pull}22031[#22031] (issue: {issue}22021[#22021]) +* Promote shadow replica to primary when initializing primary fails {pull}22021[#22021] +* Trim in-sync allocations set only when it grows {pull}21976[#21976] (issue: {issue}21719[#21719]) +* Allow master to assign primary shard to node that has shard store locked during shard state fetching {pull}21656[#21656] (issue: {issue}19416[#19416]) +* Keep a shadow replicas' allocation id when it is promoted to primary {pull}20863[#20863] (issue: {issue}20650[#20650]) +* IndicesClusterStateService should clean local started when re-assigns an initializing shard with the same aid {pull}20687[#20687] +* IndexRoutingTable.initializeEmpty shouldn't override supplied primary RecoverySource {pull}20638[#20638] (issue: {issue}20637[#20637]) +* Update incoming recoveries stats when shadow replica is reinitialized {pull}20612[#20612] +* `index.routing.allocation.initial_recovery` limits replica allocation {pull}20589[#20589] + +Analysis:: +* AsciiFoldingFilter's multi-term component should never preserve the original token. {pull}21982[#21982] +* Pre-built analysis factories do not implement MultiTermAware correctly. {pull}21981[#21981] +* Can load non-PreBuiltTokenFilter in Analyze API {pull}20396[#20396] +* Named analyzer should close the analyzer that it wraps {pull}20197[#20197] + +Bulk:: +* Reject empty IDs {pull}24118[#24118] (issue: {issue}24116[#24116]) + +CAT API:: +* Consume `full_id` request parameter early {pull}21270[#21270] (issue: {issue}21266[#21266]) + +CRUD:: +* Reject external versioning and explicit version numbers on create {pull}21998[#21998] +* MultiGet should not fail entirely if alias resolves to many indices {pull}20858[#20858] (issue: {issue}20845[#20845]) +* Fixed date math expression support in multi get requests. {pull}20659[#20659] (issue: {issue}17957[#17957]) + +Cache:: +* Invalidate cached query results if query timed out {pull}22807[#22807] (issue: {issue}22789[#22789]) +* Fix the request cache keys to not hold references to the SearchContext. {pull}21284[#21284] +* Prevent requests that use scripts or now() from being cached {pull}20750[#20750] (issue: {issue}20645[#20645]) + +Circuit Breakers:: +* ClusterState publishing shouldn't trigger circuit breakers {pull}20986[#20986] (issues: {issue}20827[#20827], {issue}20960[#20960]) + +Cluster:: +* Don't set local node on cluster state used for node join validation {pull}23311[#23311] (issues: {issue}21830[#21830], {issue}3[#3], {issue}4[#4], {issue}6[#6], {issue}9[#9]) +* Allow a cluster state applier to create an observer and wait for a better state {pull}23132[#23132] (issue: {issue}21817[#21817]) +* Cluster allocation explain to never return empty response body {pull}23054[#23054] +* IndicesService handles all exceptions during index deletion {pull}22433[#22433] +* Remove cluster update task when task times out {pull}21578[#21578] (issue: {issue}21568[#21568]) + +Core:: +* Check for default.path.data included in path.data {pull}24285[#24285] (issue: {issue}24283[#24283]) +* Improve performance of extracting warning value {pull}24114[#24114] (issue: {issue}24018[#24018]) +* Reject duplicate settings on the command line {pull}24053[#24053] +* Restrict build info loading to ES jar, not any jar {pull}24049[#24049] (issue: {issue}21955[#21955]) +* Streamline foreign stored context restore and allow to perserve response headers {pull}22677[#22677] (issue: {issue}22647[#22647]) +* Support negative numbers in readVLong {pull}22314[#22314] +* Add a StreamInput#readArraySize method that ensures sane array sizes {pull}21697[#21697] +* Use a buffer to do character to byte conversion in StreamOutput#writeString {pull}21680[#21680] (issue: {issue}21660[#21660]) +* Fix ShardInfo#toString {pull}21319[#21319] +* Protect BytesStreamOutput against overflows of the current number of written bytes. {pull}21174[#21174] (issue: {issue}21159[#21159]) +* Return target index name even if _rollover conditions are not met {pull}21138[#21138] +* .es_temp_file remains after system crash, causing it not to start again {pull}21007[#21007] (issue: {issue}20992[#20992]) +* StoreStatsCache should also ignore AccessDeniedException when checking file size {pull}20790[#20790] (issue: {issue}17580[#17580]) + +Dates:: +* Fix time zone rounding edge case for DST overlaps {pull}21550[#21550] (issue: {issue}20833[#20833]) + +Discovery:: +* ZenDiscovery - only validate min_master_nodes values if local node is master {pull}23915[#23915] (issue: {issue}23695[#23695]) +* Close InputStream when receiving cluster state in PublishClusterStateAction {pull}22711[#22711] +* Do not reply to pings from another cluster {pull}21894[#21894] (issue: {issue}21874[#21874]) +* Add current cluster state version to zen pings and use them in master election {pull}20384[#20384] (issue: {issue}20348[#20348]) + +Engine:: +* Close and flush refresh listeners on shard close {pull}22342[#22342] +* Die with dignity on the Lucene layer {pull}21721[#21721] (issue: {issue}19272[#19272]) +* Fix `InternalEngine#isThrottled` to not always return `false`. {pull}21592[#21592] +* Retrying replication requests on replica doesn't call `onRetry` {pull}21189[#21189] (issue: {issue}20211[#20211]) +* Take refresh IOExceptions into account when catching ACE in InternalEngine {pull}20546[#20546] (issue: {issue}19975[#19975]) + +Exceptions:: +* Stop returning "es." internal exception headers as http response headers {pull}22703[#22703] (issue: {issue}17593[#17593]) +* Fixing shard recovery error message to report the number of docs correctly for each node {pull}22515[#22515] (issue: {issue}21893[#21893]) + +Highlighting:: +* Fix FiltersFunctionScoreQuery highlighting {pull}21827[#21827] +* Fix highlighting on a stored keyword field {pull}21645[#21645] (issue: {issue}21636[#21636]) +* Fix highlighting of MultiTermQuery within a FunctionScoreQuery {pull}20400[#20400] (issue: {issue}20392[#20392]) + +Index APIs:: +* Fixes restore of a shrunken index when initial recovery node is gone {pull}24322[#24322] (issue: {issue}24257[#24257]) +* Honor update request timeout {pull}23825[#23825] +* Ensure shrunk indices carry over version information from its source {pull}22469[#22469] (issue: {issue}22373[#22373]) +* Validate the `_rollover` target index name early to also fail if dry_run=true {pull}21330[#21330] (issue: {issue}21149[#21149]) +* Only negate index expression on all indices with preceding wildcard {pull}20898[#20898] (issues: {issue}19800[#19800], {issue}20033[#20033]) +* Fix IndexNotFoundException in multi index search request. {pull}20188[#20188] (issue: {issue}3839[#3839]) + +Index Templates:: +* Fix integer overflows when dealing with templates. {pull}21628[#21628] (issue: {issue}21622[#21622]) + +Ingest:: +* Improve missing ingest processor error {pull}23379[#23379] (issue: {issue}23392[#23392]) +* update _ingest.timestamp to use new ZonedDateTime {pull}23174[#23174] (issue: {issue}23168[#23168]) +* fix date-processor to a new default year for every new pipeline execution {pull}22601[#22601] (issue: {issue}22547[#22547]) +* fix index out of bounds error in KV Processor {pull}22288[#22288] (issue: {issue}22272[#22272]) +* Fixes GrokProcessor's ignorance of named-captures with same name. {pull}22131[#22131] (issue: {issue}22117[#22117]) +* fix trace_match behavior for when there is only one grok pattern {pull}21413[#21413] (issue: {issue}21371[#21371]) +* Stored scripts and ingest node configurations should be included into a snapshot {pull}21227[#21227] (issue: {issue}21184[#21184]) +* make painless the default scripting language for ScriptProcessor {pull}20981[#20981] (issue: {issue}20943[#20943]) +* no null values in ingest configuration error messages {pull}20616[#20616] +* JSON Processor was not properly added {pull}20613[#20613] + +Inner Hits:: +* Replace NestedChildrenQuery with ParentChildrenBlockJoinQuery {pull}24016[#24016] (issue: {issue}24009[#24009]) +* Changed DisMaxQueryBuilder to extract inner hits from leaf queries {pull}23512[#23512] (issue: {issue}23482[#23482]) +* Inner hits and ignore unmapped {pull}21693[#21693] (issue: {issue}21620[#21620]) +* Skip adding a parent field to nested documents. {pull}21522[#21522] (issue: {issue}21503[#21503]) + +Internal:: +* Add infrastructure to mark contexts as system contexts {pull}23830[#23830] +* Always restore the ThreadContext for operations delayed due to a block {pull}23349[#23349] +* Index creation and setting update may not return deprecation logging {pull}22702[#22702] +* Rethrow ExecutionException from the loader to concurrent callers of Cache#computeIfAbsent {pull}21549[#21549] +* Restore thread's original context before returning to the ThreadPool {pull}21411[#21411] +* Fix NPE in SearchContext.toString() {pull}21069[#21069] +* Prevent AbstractArrays from release bytes more than once {pull}20819[#20819] +* Source filtering should treat dots in field names as sub objects. {pull}20736[#20736] (issue: {issue}20719[#20719]) +* IndicesAliasesRequest should not implement CompositeIndicesRequest {pull}20726[#20726] +* Ensure elasticsearch doesn't start with unuspported indices {pull}20514[#20514] (issue: {issue}20512[#20512]) + +Java API:: +* Don't output empty ext object in SearchSourceBuilder#toXContent {pull}22093[#22093] (issue: {issue}20969[#20969]) +* Transport client: Fix remove address to actually work {pull}21743[#21743] +* Add a HostFailureListener to notify client code if a node got disconnected {pull}21709[#21709] (issue: {issue}21424[#21424]) +* Fix InternalSearchHit#hasSource to return the proper boolean value {pull}21441[#21441] (issue: {issue}21419[#21419]) +* Null checked for source when calling sourceRef {pull}21431[#21431] (issue: {issue}19279[#19279]) +* ClusterAdminClient.prepareDeletePipeline method should accept pipeline id to delete {pull}21228[#21228] +* fix IndexResponse#toString to print out shards info {pull}20562[#20562] + +Java High Level REST Client:: +* Correctly parse BulkItemResponse.Failure's status {pull}23432[#23432] + +Java REST Client:: +* Make buffer limit configurable in HeapBufferedConsumerFactory {pull}23970[#23970] (issue: {issue}23958[#23958]) +* RestClient asynchronous execution should not throw exceptions {pull}23307[#23307] +* Don't use null charset in RequestLogger {pull}22197[#22197] (issue: {issue}22190[#22190]) +* Rest client: don't reuse the same HttpAsyncResponseConsumer across multiple retries {pull}21378[#21378] + +Logging:: +* Do not prematurely shutdown Log4j {pull}21519[#21519] (issue: {issue}21514[#21514]) +* Assert status logger does not warn on Log4j usage {pull}21339[#21339] +* Fix logger names for Netty {pull}21223[#21223] (issue: {issue}20457[#20457]) +* Fix logger when you can not create an azure storage client {pull}20670[#20670] (issues: {issue}20633[#20633], {issue}20669[#20669]) +* Avoid unnecessary creation of prefix loggers {pull}20571[#20571] (issue: {issue}20570[#20570]) +* Fix logging hierarchy configs {pull}20463[#20463] +* Fix prefix logging {pull}20429[#20429] + +Mapping:: +* Preserve response headers when creating an index {pull}23950[#23950] (issue: {issue}23947[#23947]) +* Improves disabled fielddata error message {pull}23841[#23841] (issue: {issue}22768[#22768]) +* Fix MapperService StackOverflowError {pull}23605[#23605] (issue: {issue}23604[#23604]) +* Fix NPE with scaled floats stats when field is not indexed {pull}23528[#23528] (issue: {issue}23487[#23487]) +* Range types causing `GetFieldMappingsIndexRequest` to fail due to `NullPointerException` in `RangeFieldMapper.doXContentBody` when `include_defaults=true` is on the query string {pull}22925[#22925] +* Disallow introducing illegal object mappings (double '..') {pull}22891[#22891] (issue: {issue}22794[#22794]) +* The `_all` default mapper is not completely configured. {pull}22236[#22236] +* Fix MapperService.allEnabled(). {pull}22227[#22227] +* Dynamic `date` fields should use the `format` that was used to detect it is a date. {pull}22174[#22174] (issue: {issue}9410[#9410]) +* Sub-fields should not accept `include_in_all` parameter {pull}21971[#21971] (issue: {issue}21710[#21710]) +* Mappings: Fix get mapping when no indexes exist to not fail in response generation {pull}21924[#21924] (issue: {issue}21916[#21916]) +* Fail to index fields with dots in field names when one of the intermediate objects is nested. {pull}21787[#21787] (issue: {issue}21726[#21726]) +* Uncommitted mapping updates should not efect existing indices {pull}21306[#21306] (issue: {issue}21189[#21189]) + +Nested Docs:: +* Fix bug in query builder rewrite that ignores the ignore_unmapped option {pull}22456[#22456] + +Network:: +* Respect promises on pipelined responses {pull}23317[#23317] (issues: {issue}23310[#23310], {issue}23322[#23322]) +* Ensure that releasing listener is called {pull}23310[#23310] +* Pass `forceExecution` flag to transport interceptor {pull}22739[#22739] +* Ensure new connections won't be opened if transport is closed or closing {pull}22589[#22589] (issue: {issue}22554[#22554]) +* Prevent open channel leaks if handshake times out or is interrupted {pull}22554[#22554] +* Execute low level handshake in #openConnection {pull}22440[#22440] +* Handle connection close / reset events gracefully during handshake {pull}22178[#22178] +* Do not lose host information when pinging {pull}21939[#21939] (issue: {issue}21828[#21828]) +* DiscoveryNode and TransportAddress should preserve host information {pull}21828[#21828] +* Die with dignity on the network layer {pull}21720[#21720] (issue: {issue}19272[#19272]) +* Fix connection close header handling {pull}20956[#20956] (issue: {issue}20938[#20938]) +* Ensure port range is readable in the exception message {pull}20893[#20893] +* Prevent double release in TcpTransport if send listener throws an exception {pull}20880[#20880] + +Packaging:: +* Fall back to non-atomic move when removing plugins {pull}23548[#23548] (issue: {issue}35[#35]) +* Another fix for handling of paths on Windows {pull}22132[#22132] (issue: {issue}21921[#21921]) +* Fix handling of spaces in Windows paths {pull}21921[#21921] (issues: {issue}20809[#20809], {issue}21525[#21525]) +* Add option to skip kernel parameters on install {pull}21899[#21899] (issue: {issue}21877[#21877]) +* Set vm.max_map_count on systemd package install {pull}21507[#21507] +* Export ES_JVM_OPTIONS for SysV init {pull}21445[#21445] (issue: {issue}21255[#21255]) +* Debian: configure start-stop-daemon to not go into background {pull}21343[#21343] (issues: {issue}12716[#12716], {issue}21300[#21300]) +* Generate POM files with non-wildcard excludes {pull}21234[#21234] (issue: {issue}21170[#21170]) +* [Packaging] Do not remove scripts directory on upgrade {pull}20452[#20452] +* [Package] Remove bin/lib/modules directories on RPM uninstall/upgrade {pull}20448[#20448] + +Parent/Child:: +* Add null check in case of orphan child document {pull}22772[#22772] (issue: {issue}22770[#22770]) + +Percolator:: +* Fix memory leak when percolator uses bitset or field data cache {pull}24115[#24115] (issue: {issue}24108[#24108]) +* Fix NPE in percolator's 'now' range check for percolator queries with range queries {pull}22356[#22356] (issue: {issue}22355[#22355]) + +Plugin Analysis Stempel:: +* Fix thread safety of Stempel's token filter factory {pull}22610[#22610] (issue: {issue}21911[#21911]) + +Plugin Discovery EC2:: +* Fix ec2 discovery when used with IAM profiles. {pull}21048[#21048] (issue: {issue}21039[#21039]) + +Plugin Ingest GeoIp:: +* [ingest-geoip] update geoip to not include null-valued results from {pull}20455[#20455] + +Plugin Lang Painless:: +* painless: Fix method references to ctor with the new LambdaBootstrap and cleanup code {pull}24406[#24406] +* Fix Painless Lambdas for Java 9 {pull}24070[#24070] (issue: {issue}23473[#23473]) +* Fix painless's regex lexer and error messages {pull}23634[#23634] +* Replace Painless's Cast with casting strategies {pull}23369[#23369] +* Fix Bad Casts In Painless {pull}23282[#23282] (issue: {issue}23238[#23238]) +* Don't allow casting from void to def in painless {pull}22969[#22969] (issue: {issue}22908[#22908]) +* Fix def invoked qualified method refs {pull}22918[#22918] +* Whitelist some ScriptDocValues in painless {pull}22600[#22600] (issue: {issue}22584[#22584]) +* Update Painless Loop Counter to be Higher {pull}22560[#22560] (issue: {issue}22508[#22508]) +* Fix some issues with painless's strings {pull}22393[#22393] (issue: {issue}22372[#22372]) +* Test fix for def equals in Painless {pull}21945[#21945] (issue: {issue}21801[#21801]) +* Fix a VerifyError bug in Painless {pull}21765[#21765] +* Fix Lambdas in Painless to be Able to Use Top-Level Variables Such as params and doc {pull}21635[#21635] (issues: {issue}20869[#20869], {issue}21479[#21479]) +* Fix String Concatenation Bug In Painless {pull}20623[#20623] + +Plugin Repository Azure:: +* Azure blob store's readBlob() method first checks if the blob exists {pull}23483[#23483] (issue: {issue}23480[#23480]) +* Fixes default chunk size for Azure repositories {pull}22577[#22577] (issue: {issue}22513[#22513]) +* readonly on azure repository must be taken into account {pull}22055[#22055] (issues: {issue}22007[#22007], {issue}22053[#22053]) + +Plugin Repository S3:: +* Handle BlobPath's trailing separator case. Add test cases to BlobPathTests.java {pull}23091[#23091] +* Fixes leading forward slash in S3 repository base_path {pull}20861[#20861] + +Plugins:: +* Fix delete of plugin directory on remove plugin {pull}24266[#24266] (issue: {issue}24252[#24252]) +* Use a marker file when removing a plugin {pull}24252[#24252] (issue: {issue}24231[#24231]) +* Remove hidden file leniency from plugin service {pull}23982[#23982] (issue: {issue}12465[#12465]) +* Add check for null pluginName in remove command {pull}22930[#22930] (issue: {issue}22922[#22922]) +* Use sysprop like with es.path.home to pass conf dir {pull}18870[#18870] (issue: {issue}18689[#18689]) + +Query DSL:: +* FuzzyQueryBuilder should error when parsing array of values {pull}23762[#23762] (issue: {issue}23759[#23759]) +* Fix parsing for `max_determinized_states` {pull}22749[#22749] (issue: {issue}22722[#22722]) +* Fix script score function that combines _score and weight {pull}22713[#22713] (issue: {issue}21483[#21483]) +* Fixes date range query using epoch with timezone {pull}21542[#21542] (issue: {issue}21501[#21501]) +* Allow overriding all-field leniency when `lenient` option is specified {pull}21504[#21504] (issues: {issue}20925[#20925], {issue}21341[#21341]) +* Max score should be updated when a rescorer is used {pull}20977[#20977] (issue: {issue}20651[#20651]) +* Fixes MultiMatchQuery so that it doesn't provide a null context {pull}20882[#20882] +* Fix silently accepting malformed queries {pull}20515[#20515] (issue: {issue}20500[#20500]) +* Fix match_phrase_prefix query with single term on _all field {pull}20471[#20471] (issue: {issue}20470[#20470]) + +REST:: +* [API] change wait_for_completion default according to docs {pull}23672[#23672] +* Deprecate request_cache for clear-cache {pull}23638[#23638] (issue: {issue}22748[#22748]) +* HTTP transport stashes the ThreadContext instead of the RestController {pull}23456[#23456] +* Fix date format in warning headers {pull}23418[#23418] (issue: {issue}23275[#23275]) +* Align REST specs for HEAD requests {pull}23313[#23313] (issue: {issue}21125[#21125]) +* Correct warning header to be compliant {pull}23275[#23275] (issue: {issue}22986[#22986]) +* Fix get HEAD requests {pull}23186[#23186] (issue: {issue}21125[#21125]) +* Handle bad HTTP requests {pull}23153[#23153] (issue: {issue}23034[#23034]) +* Fix get source HEAD requests {pull}23151[#23151] (issue: {issue}21125[#21125]) +* Properly encode location header {pull}23133[#23133] (issues: {issue}21057[#21057], {issue}23115[#23115]) +* Fix template HEAD requests {pull}23130[#23130] (issue: {issue}21125[#21125]) +* Fix index HEAD requests {pull}23112[#23112] (issue: {issue}21125[#21125]) +* Fix alias HEAD requests {pull}23094[#23094] (issue: {issue}21125[#21125]) +* Strict level parsing for indices stats {pull}21577[#21577] (issue: {issue}21024[#21024]) +* The routing query string param is supported by mget but was missing from the rest spec {pull}21357[#21357] +* fix thread_pool_patterns path variable definition {pull}21332[#21332] +* Read indices options in indices upgrade API {pull}21281[#21281] (issue: {issue}21099[#21099]) +* ensure the XContentBuilder is always closed in RestBuilderListener {pull}21124[#21124] +* Add correct Content-Length on HEAD requests {pull}21123[#21123] (issue: {issue}21077[#21077]) +* Make sure HEAD / has 0 Content-Length {pull}21077[#21077] (issue: {issue}21075[#21075]) +* Adds percent-encoding for Location headers {pull}21057[#21057] (issue: {issue}21016[#21016]) +* Whitelist node stats indices level parameter {pull}21024[#21024] (issue: {issue}20722[#20722]) +* Remove lenient URL parameter parsing {pull}20722[#20722] (issue: {issue}14719[#14719]) +* XContentBuilder: Avoid building self-referencing objects {pull}20550[#20550] (issues: {issue}19475[#19475], {issue}20540[#20540]) + +Recovery:: +* Provide target allocation id as part of start recovery request {pull}24333[#24333] (issue: {issue}24167[#24167]) +* Fix primary relocation for shadow replicas {pull}22474[#22474] (issue: {issue}20300[#20300]) +* Don't close store under CancellableThreads {pull}22434[#22434] (issue: {issue}22325[#22325]) +* Use a fresh recovery id when retrying recoveries {pull}22325[#22325] (issue: {issue}22043[#22043]) +* Allow flush/force_merge/upgrade on shard marked as relocated {pull}22078[#22078] (issue: {issue}22043[#22043]) +* Fix concurrency issues between cancelling a relocation and marking shard as relocated {pull}20443[#20443] + +Reindex API:: +* Fix throttled reindex_from_remote {pull}23953[#23953] (issues: {issue}23828[#23828], {issue}23945[#23945]) +* Fix reindex with a remote source on a version before 2.0.0 {pull}23805[#23805] +* Make reindex wait for cleanup before responding {pull}23677[#23677] (issue: {issue}23653[#23653]) +* Reindex: do not log when can't clear old scroll {pull}22942[#22942] (issue: {issue}22937[#22937]) +* Fix reindex-from-remote from <2.0 {pull}22931[#22931] (issue: {issue}22893[#22893]) +* Fix reindex from remote clearing scroll {pull}22525[#22525] (issue: {issue}22514[#22514]) +* Fix source filtering in reindex-from-remote {pull}22514[#22514] (issue: {issue}22507[#22507]) +* Remove content type detection from reindex-from-remote {pull}22504[#22504] (issue: {issue}22329[#22329]) +* Don't close rest client from its callback {pull}22061[#22061] (issue: {issue}22027[#22027]) +* Keep context during reindex's retries {pull}21941[#21941] +* Ignore IllegalArgumentException with assertVersionSerializable {pull}21409[#21409] (issues: {issue}20767[#20767], {issue}21350[#21350]) +* Bump reindex-from-remote's buffer to 200mb {pull}21222[#21222] (issue: {issue}21185[#21185]) +* Fix reindex-from-remote for parent/child from <2.0 {pull}21070[#21070] (issue: {issue}21044[#21044]) + +Scripting:: +* Convert script/template objects to json format internally {pull}23308[#23308] (issue: {issue}23245[#23245]) +* Script: Fix value of `ctx._now` to be current epoch time in milliseconds {pull}23175[#23175] (issue: {issue}23169[#23169]) +* Expose `ip` fields as strings in scripts. {pull}21997[#21997] (issue: {issue}21977[#21977]) +* Add support for booleans in scripts {pull}20950[#20950] (issue: {issue}20949[#20949]) +* Native scripts should be created once per index, not per segment. {pull}20609[#20609] + +Search:: +* Cross Cluster Search: propagate original indices per cluster {pull}24328[#24328] +* Query string default field {pull}24214[#24214] +* Speed up parsing of large `terms` queries. {pull}24210[#24210] +* IndicesQueryCache should delegate the scorerSupplier method. {pull}24209[#24209] +* Disable graph analysis at query time for shingle and cjk filters producing tokens of different size {pull}23920[#23920] (issue: {issue}23918[#23918]) +* Fix cross-cluster remote node gateway attributes {pull}23863[#23863] +* Use a fixed seed for computing term hashCode in TermsSliceQuery {pull}23795[#23795] +* Honor max concurrent searches in multi-search {pull}23538[#23538] (issue: {issue}23527[#23527]) +* Avoid stack overflow in multi-search {pull}23527[#23527] (issue: {issue}23523[#23523]) +* Fix query_string_query to transform "foo:*" in an exists query on the field name {pull}23433[#23433] (issue: {issue}23356[#23356]) +* Factor out filling of TopDocs in SearchPhaseController {pull}23380[#23380] (issues: {issue}19356[#19356], {issue}23357[#23357]) +* Replace blocking calls in ExpandCollapseSearchResponseListener by asynchronous requests {pull}23053[#23053] (issue: {issue}23048[#23048]) +* Ensure fixed serialization order of InnerHitBuilder {pull}22820[#22820] (issue: {issue}22808[#22808]) +* Improve concurrency of ShardCoreKeyMap. {pull}22316[#22316] +* Make `-0` compare less than `+0` consistently. {pull}22173[#22173] (issue: {issue}22167[#22167]) +* Fix boost_mode propagation when the function score query builder is rewritten {pull}22172[#22172] (issue: {issue}22138[#22138]) +* FiltersAggregationBuilder: rewriting filter queries, the same way as in FilterAggregationBuilder {pull}22076[#22076] +* Fix cross_fields type on multi_match query with synonyms {pull}21638[#21638] (issue: {issue}21633[#21633]) +* Fix match_phrase_prefix on boosted fields {pull}21623[#21623] (issue: {issue}21613[#21613]) +* Respect default search timeout {pull}21599[#21599] (issues: {issue}12211[#12211], {issue}21595[#21595]) +* Remove LateParsingQuery to prevent timestamp access after context is frozen {pull}21328[#21328] (issue: {issue}21295[#21295]) +* Make range queries round up upper bounds again. {pull}20582[#20582] (issues: {issue}20579[#20579], {issue}8889[#8889]) +* Throw error when trying to fetch fields from source and source is disabled {pull}20424[#20424] (issues: {issue}20093[#20093], {issue}20408[#20408]) + +Search Templates:: +* No longer add illegal content type option to stored search templates {pull}24251[#24251] (issue: {issue}24227[#24227]) +* SearchTemplateRequest to implement CompositeIndicesRequest {pull}21865[#21865] (issue: {issue}21747[#21747]) + +Settings:: +* Do not set path.data in environment if not set {pull}24132[#24132] (issue: {issue}24099[#24099]) +* Correct handling of default and array settings {pull}24074[#24074] (issues: {issue}23981[#23981], {issue}24052[#24052]) +* Fix merge scheduler config settings {pull}23391[#23391] +* Settings: Fix keystore cli prompting for yes/no to handle console returning null {pull}23320[#23320] +* Expose `search.highlight.term_vector_multi_value` as a node level setting {pull}22999[#22999] +* NPE when no setting name passed to elasticsearch-keystore {pull}22609[#22609] +* Handle spaces in `action.auto_create_index` gracefully {pull}21790[#21790] (issue: {issue}21449[#21449]) +* Fix settings diff generation for affix and group settings {pull}21788[#21788] +* Don't reset non-dynamic settings unless explicitly requested {pull}21646[#21646] (issue: {issue}21593[#21593]) +* Fix Setting.timeValue() method {pull}20696[#20696] (issue: {issue}20662[#20662]) +* Add a hard limit for `index.number_of_shard` {pull}20682[#20682] +* Include complex settings in settings requests {pull}20622[#20622] + +Snapshot/Restore:: +* Fixes maintaining the shards a snapshot is waiting on {pull}24289[#24289] +* Fixes snapshot status on failed snapshots {pull}23833[#23833] (issue: {issue}23716[#23716]) +* Fixes snapshot deletion handling on in-progress snapshot failure {pull}23703[#23703] (issue: {issue}23663[#23663]) +* Prioritize listing index-N blobs over index.latest in reading snapshots {pull}23333[#23333] +* Gracefully handles pre 2.x compressed snapshots {pull}22267[#22267] +* URLRepository should throw NoSuchFileException to correctly adhere to readBlob contract {pull}22069[#22069] (issue: {issue}22004[#22004]) +* Fixes shard level snapshot metadata loading when index-N file is missing {pull}21813[#21813] +* Ensures cleanup of temporary index-* generational blobs during snapshotting {pull}21469[#21469] (issue: {issue}21462[#21462]) +* Fixes get snapshot duplicates when asking for _all {pull}21340[#21340] (issue: {issue}21335[#21335]) + +Stats:: +* Avoid overflow when computing total FS stats {pull}23641[#23641] +* Handle existence of cgroup version 2 hierarchy {pull}23493[#23493] (issue: {issue}23486[#23486]) +* Handle long overflow when adding paths' totals {pull}23293[#23293] (issue: {issue}23093[#23093]) +* Fix control group pattern {pull}23219[#23219] (issue: {issue}23218[#23218]) +* Fix total disk bytes returning negative value {pull}23093[#23093] +* Implement stats for geo_point and geo_shape field {pull}22391[#22391] (issue: {issue}22384[#22384]) +* Use reader for doc stats {pull}22317[#22317] (issue: {issue}22285[#22285]) +* Avoid NPE in NodeService#stats if HTTP is disabled {pull}22060[#22060] (issue: {issue}22058[#22058]) +* Add support for "include_segment_file_sizes" in indices stats REST handler {pull}21879[#21879] (issue: {issue}21878[#21878]) +* Remove output_uuid parameter from cluster stats {pull}21020[#21020] (issue: {issue}20722[#20722]) +* Fix FieldStats deserialization of `ip` field {pull}20522[#20522] (issue: {issue}20516[#20516]) + +Task Manager:: +* Task Management: Make TaskInfo parsing forwards compatible {pull}24073[#24073] (issue: {issue}23250[#23250]) +* Fix hanging cancelling task with no children {pull}22796[#22796] +* Fix broken TaskInfo.toString() {pull}22698[#22698] (issue: {issue}22387[#22387]) +* Task cancellation command should wait for all child nodes to receive cancellation request before returning {pull}21397[#21397] (issue: {issue}21126[#21126]) + +Term Vectors:: +* Fix _termvectors with preference to not hit NPE {pull}21959[#21959] +* Return correct term statistics when a field is not found in a shard {pull}21922[#21922] (issue: {issue}21906[#21906]) + +Tribe Node:: +* Add socket permissions for tribe nodes {pull}21546[#21546] (issues: {issue}16392[#16392], {issue}21122[#21122]) + + + +[[regression-6.0.0-alpha1-5x]] +[float] +=== Regressions + +Bulk:: +* Fix _bulk response when it can't create an index {pull}24048[#24048] (issues: {issue}22488[#22488], {issue}24028[#24028]) + +Core:: +* Source filtering: only accept array items if the previous include pattern matches {pull}22593[#22593] (issue: {issue}22557[#22557]) + +Highlighting:: +* Handle SynonymQuery extraction for the FastVectorHighlighter {pull}20829[#20829] (issue: {issue}20781[#20781]) + +Logging:: +* Restores the original default format of search slow log {pull}21770[#21770] (issue: {issue}21711[#21711]) + +Plugin Discovery EC2:: +* Fix ec2 discovery when used with IAM profiles. {pull}21042[#21042] (issue: {issue}21039[#21039]) + +Plugin Repository S3:: +* Fix s3 repository when used with IAM profiles {pull}21058[#21058] (issue: {issue}21048[#21048]) + +Plugins:: +* Plugins: Add back user agent when downloading plugins {pull}20872[#20872] + +Search:: +* Handle specialized term queries in MappedFieldType.extractTerm(Query) {pull}21889[#21889] (issue: {issue}21882[#21882]) + + + +[[upgrade-6.0.0-alpha1-5x]] +[float] +=== Upgrades + +Aggregations:: +* Upgrade HDRHistogram to 2.1.9 {pull}23254[#23254] (issue: {issue}23239[#23239]) + +Core:: +* Upgrade to Lucene 6.5.0 {pull}23750[#23750] +* Upgrade from JNA 4.2.2 to JNA 4.4.0 {pull}23636[#23636] +* Upgrade to lucene-6.5.0-snapshot-d00c5ca {pull}23385[#23385] +* Upgrade to lucene-6.5.0-snapshot-f919485. {pull}23087[#23087] +* Upgrade to Lucene 6.4.0 {pull}22724[#22724] +* Update Jackson to 2.8.6 {pull}22596[#22596] (issue: {issue}22266[#22266]) +* Upgrade to lucene-6.4.0-snapshot-084f7a0. {pull}22413[#22413] +* Upgrade to lucene-6.4.0-snapshot-ec38570 {pull}21853[#21853] +* Upgrade to lucene-6.3.0. {pull}21464[#21464] + +Dates:: +* Update Joda Time to version 2.9.5 {pull}21468[#21468] (issues: {issue}20911[#20911], {issue}332[#332], {issue}373[#373], {issue}378[#378], {issue}379[#379], {issue}386[#386], {issue}394[#394], {issue}396[#396], {issue}397[#397], {issue}404[#404], {issue}69[#69]) + +Internal:: +* Upgrade to Lucene 6.4.1. {pull}22978[#22978] + +Logging:: +* Upgrade to Log4j 2.8.2 {pull}23995[#23995] +* Upgrade Log4j 2 to version 2.7 {pull}20805[#20805] (issue: {issue}20304[#20304]) + +Network:: +* Upgrade Netty to 4.1.10.Final {pull}24414[#24414] +* Upgrade to Netty 4.1.9 {pull}23540[#23540] (issues: {issue}23172[#23172], {issue}6308[#6308], {issue}6374[#6374]) +* Upgrade to Netty 4.1.8 {pull}23055[#23055] +* Upgrade to Netty 4.1.7 {pull}22587[#22587] +* Upgrade to Netty 4.1.6 {pull}21051[#21051] + +Plugin Repository Azure:: +* Update to Azure Storage 5.0.0 {pull}23517[#23517] (issue: {issue}23448[#23448]) + diff --git a/docs/reference/release-notes/6.0.0-alpha1.asciidoc b/docs/reference/release-notes/6.0.0-alpha1.asciidoc new file mode 100644 index 00000000000..3af5dc49df8 --- /dev/null +++ b/docs/reference/release-notes/6.0.0-alpha1.asciidoc @@ -0,0 +1,310 @@ +[[release-notes-6.0.0-alpha1]] +== 6.0.0-alpha1 Release Notes + +The changes listed below have been released for the first time in Elasticsearch 6.0.0-alpha1. Changes in this release which were first released in the 5.x series are listed in <<release-notes-6.0.0-alpha1-5x>>. + + +Also see <<breaking-changes-6.0>>. + +[[breaking-6.0.0-alpha1]] +[float] +=== Breaking changes + +Allocation:: +* Remove `cluster.routing.allocation.snapshot.relocation_enabled` setting {pull}20994[#20994] + +Analysis:: +* Removing query-string parameters in `_analyze` API {pull}20704[#20704] (issue: {issue}20246[#20246]) + +CAT API:: +* Write -1 on unbounded queue in cat thread pool {pull}21342[#21342] (issue: {issue}21187[#21187]) + +CRUD:: +* Disallow `VersionType.FORCE` for GetRequest {pull}21079[#21079] (issue: {issue}20995[#20995]) +* Disallow `VersionType.FORCE` versioning for 6.x indices {pull}20995[#20995] (issue: {issue}20377[#20377]) + +Cluster:: +* No longer allow cluster name in data path {pull}20433[#20433] (issue: {issue}20391[#20391]) + +Core:: +* Make boolean conversion strict {pull}22200[#22200] +* Remove the `default` store type. {pull}21616[#21616] +* Remove store throttling. {pull}21573[#21573] + +Geo:: +* Remove deprecated geo search features {pull}22876[#22876] +* Reduce GeoDistance Insanity {pull}19846[#19846] + +Index APIs:: +* Remove support for controversial `ignore_unavailable` and `allow_no_indices` from indices exists api {pull}20712[#20712] + +Index Templates:: +* Allows multiple patterns to be specified for index templates {pull}21009[#21009] (issue: {issue}20690[#20690]) + +Java API:: +* Enforce Content-Type requirement on the rest layer and remove deprecated methods {pull}23146[#23146] (issue: {issue}19388[#19388]) + +Mapping:: +* Disallow `include_in_all` for 6.0+ indices {pull}22970[#22970] (issue: {issue}22923[#22923]) +* Disable _all by default, disallow configuring _all on 6.0+ indices {pull}22144[#22144] (issues: {issue}19784[#19784], {issue}20925[#20925], {issue}21341[#21341]) +* Throw an exception on unrecognized "match_mapping_type" {pull}22090[#22090] (issue: {issue}17285[#17285]) + +NOT CLASSIFIED:: +* Add note to docs on duplicate keys in config {pull}24022[#24022] (issue: {issue}24006[#24006]) + +Network:: +* Remove blocking TCP clients and servers {pull}22639[#22639] +* Remove `modules/transport_netty_3` in favor of `netty_4` {pull}21590[#21590] +* Remove LocalTransport in favor of MockTcpTransport {pull}20695[#20695] + +Packaging:: +* Remove customization of ES_USER and ES_GROUP {pull}23989[#23989] (issue: {issue}23848[#23848]) + +Percolator:: +* Remove deprecated percolate and mpercolate apis {pull}22331[#22331] + +Plugin Delete By Query:: +* Require explicit query in _delete_by_query API {pull}23632[#23632] (issue: {issue}23629[#23629]) + +Plugin Discovery EC2:: +* Ec2 Discovery: Cleanup deprecated settings {pull}24150[#24150] +* Discovery EC2: Remove region setting {pull}23991[#23991] (issue: {issue}22758[#22758]) +* AWS Plugins: Remove signer type setting {pull}23984[#23984] (issue: {issue}22599[#22599]) + +Plugin Lang JS:: +* Remove lang-python and lang-javascript {pull}20734[#20734] (issue: {issue}20698[#20698]) + +Plugin Mapper Attachment:: +* Remove mapper attachments plugin {pull}20416[#20416] (issue: {issue}18837[#18837]) + +Plugin Repository Azure:: +* Remove global `repositories.azure` settings {pull}23262[#23262] (issues: {issue}22800[#22800], {issue}22856[#22856]) +* Remove auto creation of container for azure repository {pull}22858[#22858] (issue: {issue}22857[#22857]) + +Plugin Repository S3:: +* S3 Repository: Cleanup deprecated settings {pull}24097[#24097] +* S3 Repository: Remove region setting {pull}22853[#22853] (issue: {issue}22758[#22758]) +* S3 Repository: Remove bucket auto create {pull}22846[#22846] (issue: {issue}22761[#22761]) +* S3 Repository: Remove env var and sysprop credentials support {pull}22842[#22842] + +Query DSL:: +* Remove deprecated `minimum_number_should_match` in BoolQueryBuilder {pull}22416[#22416] +* Remove support for empty queries {pull}22092[#22092] (issue: {issue}17624[#17624]) +* Remove deprecated query names: in, geo_bbox, mlt, fuzzy_match and match_fuzzy {pull}21852[#21852] +* The `terms` query should always map to a Lucene `TermsQuery`. {pull}21786[#21786] +* Be strict when parsing values searching for booleans {pull}21555[#21555] (issue: {issue}21545[#21545]) +* Remove collect payloads parameter {pull}20385[#20385] + +REST:: +* Remove ldjson support and document ndjson for bulk/msearch {pull}23049[#23049] (issue: {issue}23025[#23025]) +* Enable strict duplicate checks for all XContent types {pull}22225[#22225] (issues: {issue}19614[#19614], {issue}22073[#22073]) +* Enable strict duplicate checks for JSON content {pull}22073[#22073] (issue: {issue}19614[#19614]) +* Remove lenient stats parsing {pull}21417[#21417] (issues: {issue}20722[#20722], {issue}21410[#21410]) +* Remove allow unquoted JSON {pull}20388[#20388] (issues: {issue}17674[#17674], {issue}17801[#17801]) +* Remove FORCE version_type {pull}20377[#20377] (issue: {issue}19769[#19769]) + +Scripting:: +* Make dates be ReadableDateTimes in scripts {pull}22948[#22948] (issue: {issue}22875[#22875]) +* Remove groovy scripting language {pull}21607[#21607] + +Search:: +* ProfileResult and CollectorResult should print machine readable timing information {pull}22561[#22561] +* Remove indices query {pull}21837[#21837] (issue: {issue}17710[#17710]) +* Remove ignored type parameter in search_shards api {pull}21688[#21688] + +Sequence IDs:: +* Change certain replica failures not to fail the replica shard {pull}22874[#22874] (issue: {issue}10708[#10708]) + +Shadow Replicas:: +* Remove shadow replicas {pull}23906[#23906] (issue: {issue}22024[#22024]) + + + +[[breaking-java-6.0.0-alpha1]] +[float] +=== Breaking Java changes + +Network:: +* Simplify TransportAddress {pull}20798[#20798] + + + +[[deprecation-6.0.0-alpha1]] +[float] +=== Deprecations + +Index Templates:: +* Restore deprecation warning for invalid match_mapping_type values {pull}22304[#22304] + +Internal:: +* Deprecate XContentType auto detection methods in XContentFactory {pull}22181[#22181] (issue: {issue}19388[#19388]) + + + +[[feature-6.0.0-alpha1]] +[float] +=== New features + +Core:: +* Enable index-time sorting {pull}24055[#24055] (issue: {issue}6720[#6720]) +* Add new ip_range field type {pull}24433[#24433] + + + +[[enhancement-6.0.0-alpha1]] +[float] +=== Enhancements + +Aggregations:: +* Agg builder accessibility fixes {pull}24323[#24323] +* Remove support for the include/pattern syntax. {pull}23141[#23141] (issue: {issue}22933[#22933]) +* Promote longs to doubles when a terms agg mixes decimal and non-decimal numbers {pull}22449[#22449] (issue: {issue}22232[#22232]) + +Analysis:: +* Match- and MultiMatchQueryBuilder should only allow setting analyzer on string values {pull}23684[#23684] (issue: {issue}21665[#21665]) + +Bulk:: +* Simplify bulk request execution {pull}20109[#20109] + +CRUD:: +* Added validation for upsert request {pull}24282[#24282] (issue: {issue}16671[#16671]) + +Cluster:: +* Separate publishing from applying cluster states {pull}24236[#24236] +* Adds cluster state size to /_cluster/state response {pull}23440[#23440] (issue: {issue}3415[#3415]) + +Core:: +* Remove connect SocketPermissions from core {pull}22797[#22797] +* Add repository-url module and move URLRepository {pull}22752[#22752] (issue: {issue}22116[#22116]) +* Remove accept SocketPermissions from core {pull}22622[#22622] (issue: {issue}22116[#22116]) +* Move IfConfig.logIfNecessary call into bootstrap {pull}22455[#22455] (issue: {issue}22116[#22116]) +* Remove artificial default processors limit {pull}20874[#20874] (issue: {issue}20828[#20828]) +* Simplify write failure handling {pull}19105[#19105] (issue: {issue}20109[#20109]) + +Engine:: +* Fill missing sequence IDs up to max sequence ID when recovering from store {pull}24238[#24238] (issue: {issue}10708[#10708]) +* Use sequence numbers to identify out of order delivery in replicas & recovery {pull}24060[#24060] (issue: {issue}10708[#10708]) +* Add replica ops with version conflict to translog {pull}22626[#22626] +* Clarify global checkpoint recovery {pull}21934[#21934] (issue: {issue}21254[#21254]) + +Internal:: +* Try to convince the JVM not to lose stacktraces {pull}24426[#24426] (issue: {issue}24376[#24376]) +* Make document write requests immutable {pull}23038[#23038] + +Java High Level REST Client:: +* Add info method to High Level Rest client {pull}23350[#23350] +* Add support for named xcontent parsers to high level REST client {pull}23328[#23328] +* Add BulkRequest support to High Level Rest client {pull}23312[#23312] +* Add UpdateRequest support to High Level Rest client {pull}23266[#23266] +* Add delete API to the High Level Rest Client {pull}23187[#23187] +* Add Index API to High Level Rest Client {pull}23040[#23040] +* Add get/exists method to RestHighLevelClient {pull}22706[#22706] +* Add fromxcontent methods to delete response {pull}22680[#22680] (issue: {issue}22229[#22229]) +* Add parsing from xContent to SearchResponse {pull}22533[#22533] +* Add REST high level client gradle submodule and first simple method {pull}22371[#22371] + +Java REST Client:: +* Wrap rest httpclient with doPrivileged blocks {pull}22603[#22603] (issue: {issue}22116[#22116]) + +Mapping:: +* Date detection should not rely on a hardcoded set of characters. {pull}22171[#22171] (issue: {issue}1694[#1694]) + +Network:: +* Isolate SocketPermissions to Netty {pull}23057[#23057] +* Wrap netty accept/connect ops with doPrivileged {pull}22572[#22572] (issue: {issue}22116[#22116]) +* Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions {pull}22287[#22287] (issue: {issue}22116[#22116]) + +Plugin Discovery EC2:: +* Read ec2 discovery address from aws instance tags {pull}22743[#22743] (issue: {issue}22566[#22566]) + +Plugin Repository HDFS:: +* Add doPrivilege blocks for socket connect ops in repository-hdfs {pull}22793[#22793] (issue: {issue}22116[#22116]) + +Plugins:: +* Add doPrivilege blocks for socket connect operations in plugins {pull}22534[#22534] (issue: {issue}22116[#22116]) + +Recovery:: +* Peer Recovery: remove maxUnsafeAutoIdTimestamp hand off {pull}24243[#24243] (issue: {issue}24149[#24149]) +* Introduce sequence-number-based recovery {pull}22484[#22484] (issue: {issue}10708[#10708]) + +Search:: +* Add parsing from xContent to Suggest {pull}22903[#22903] +* Add parsing from xContent to ShardSearchFailure {pull}22699[#22699] + +Sequence IDs:: +* Add primary term to doc write response {pull}24171[#24171] (issue: {issue}10708[#10708]) +* Preserve multiple translog generations {pull}24015[#24015] (issue: {issue}10708[#10708]) +* Introduce translog generation rolling {pull}23606[#23606] (issue: {issue}10708[#10708]) +* Replicate write failures {pull}23314[#23314] +* Introduce sequence-number-aware translog {pull}22822[#22822] (issue: {issue}10708[#10708]) +* Introduce translog no-op {pull}22291[#22291] (issue: {issue}10708[#10708]) +* Tighten sequence numbers recovery {pull}22212[#22212] (issue: {issue}10708[#10708]) +* Add BWC layer to seq no infra and enable BWC tests {pull}22185[#22185] (issue: {issue}21670[#21670]) +* Add internal _primary_term doc values field, fix _seq_no indexing {pull}21637[#21637] (issues: {issue}10708[#10708], {issue}21480[#21480]) +* Add global checkpoint to translog checkpoints {pull}21254[#21254] +* Sequence numbers commit data for Lucene uses Iterable interface {pull}20793[#20793] (issue: {issue}10708[#10708]) +* Simplify GlobalCheckpointService and properly hook it for cluster state updates {pull}20720[#20720] + +Stats:: +* Expose disk usage estimates in nodes stats {pull}22081[#22081] (issue: {issue}8686[#8686]) + +Store:: +* Remote support for lucene versions without checksums {pull}24021[#24021] + +Suggesters:: +* Remove deprecated _suggest endpoint {pull}22203[#22203] (issue: {issue}20305[#20305]) + +Task Manager:: +* Add descriptions to bulk tasks {pull}22059[#22059] (issue: {issue}21768[#21768]) + + + +[[bug-6.0.0-alpha1]] +[float] +=== Bug fixes + +Ingest:: +* Remove support for Visio and potm files {pull}22079[#22079] (issue: {issue}22077[#22077]) + +Inner Hits:: +* If size / offset are out of bounds just do a plain count {pull}20556[#20556] (issue: {issue}20501[#20501]) + +Internal:: +* Fix handling of document failure exception in InternalEngine {pull}22718[#22718] + +Plugin Ingest Attachment:: +* Add missing mime4j library {pull}22764[#22764] (issue: {issue}22077[#22077]) + +Plugin Repository S3:: +* Wrap getCredentials() in a doPrivileged() block {pull}23297[#23297] (issues: {issue}22534[#22534], {issue}23271[#23271]) + +Sequence IDs:: +* Avoid losing ops in file-based recovery {pull}22945[#22945] (issue: {issue}22484[#22484]) + +Snapshot/Restore:: +* Keep snapshot restore state and routing table in sync {pull}20836[#20836] (issue: {issue}19774[#19774]) + +Translog:: +* Fix Translog.Delete serialization for sequence numbers {pull}22543[#22543] + + + +[[regression-6.0.0-alpha1]] +[float] +=== Regressions + +Bulk:: +* Only re-parse operation if a mapping update was needed {pull}23832[#23832] (issue: {issue}23665[#23665]) + + + +[[upgrade-6.0.0-alpha1]] +[float] +=== Upgrades + +Core:: +* Upgrade to a Lucene 7 snapshot {pull}24089[#24089] (issues: {issue}23966[#23966], {issue}24086[#24086], {issue}24087[#24087], {issue}24088[#24088]) + +Plugin Ingest Attachment:: +* Update to Tika 1.14 {pull}21591[#21591] (issue: {issue}20390[#20390]) + diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 1b118a419ed..900426bee9b 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -22,6 +22,7 @@ The structure looks like this: } } -------------------------------------------------- +// NOTCONSOLE If `inner_hits` is defined on a query that supports it then each search hit will contain an `inner_hits` json object with the following structure: @@ -52,6 +53,7 @@ If `inner_hits` is defined on a query that supports it then each search hit will ... ] -------------------------------------------------- +// NOTCONSOLE ==== Options @@ -80,22 +82,50 @@ Inner hits also supports the following per document features: The nested `inner_hits` can be used to include nested inner objects as inner hits to a search hit. -The example below assumes that there is a nested object field defined with the name `comments`: - [source,js] -------------------------------------------------- +PUT test { - "query" : { - "nested" : { - "path" : "comments", - "query" : { - "match" : {"comments.message" : "[actual query]"} - }, - "inner_hits" : {} <1> + "mappings": { + "doc": { + "properties": { + "comments": { + "type": "nested" } + } } + } +} + +PUT test/doc/1?refresh +{ + "title": "Test title", + "comments": [ + { + "author": "kimchy", + "text": "comment text" + }, + { + "author": "nik9000", + "text": "words words words" + } + ] +} + +POST test/_search +{ + "query": { + "nested": { + "path": "comments", + "query": { + "match": {"comments.text" : "words"} + }, + "inner_hits": {} <1> + } + } } -------------------------------------------------- +// CONSOLE <1> The inner hit definition in the nested query. No other options need to be defined. @@ -103,35 +133,46 @@ An example of a response snippet that could be generated from the above search r [source,js] -------------------------------------------------- -... -"hits": { - ... - "hits": [ - { - "_index": "my-index", - "_type": "question", +{ + ..., + "hits": { + "total": 1, + "max_score": 0.9651416, + "hits": [ + { + "_index": "test", + "_type": "doc", "_id": "1", + "_score": 0.9651416, "_source": ..., "inner_hits": { - "comments": { <1> - "hits": { - "total": ..., - "hits": [ - { - "_nested": { - "field": "comments", - "offset": 2 - }, - "_source": ... - }, - ... - ] - } - } + "comments": { <1> + "hits": { + "total": 1, + "max_score": 0.9651416, + "hits": [ + { + "_nested": { + "field": "comments", + "offset": 1 + }, + "_score": 0.9651416, + "_source": { + "author": "nik9000", + "text": "words words words" + } + } + ] + } + } } - }, - ... + } + ] + } +} -------------------------------------------------- +// TESTRESPONSE[s/"_source": \.\.\./"_source": $body.hits.hits.0._source/] +// TESTRESPONSE[s/\.\.\./"timed_out": false, "took": $body.took, "_shards": $body._shards/] <1> The name used in the inner hit definition in the search request. A custom key can be used via the `name` option. @@ -156,46 +197,111 @@ its `_source` field. To include the source of just the nested document, the sour the relevant bit for the nested document is included as source in the inner hit. Doing this for each matching nested document has an impact on the time it takes to execute the entire search request, especially when `size` and the inner hits' `size` are set higher than the default. To avoid the relative expensive source extraction for nested inner hits, one can disable -including the source and solely rely on stored fields. - -Enabled stored field for fields under the nested object field in your mapping: +including the source and solely rely on stored fields. Like this: [source,js] -------------------------------------------------- +PUT test { - "properties": { - "comment": { - "type": "comments", - "properties" : { - "message" : { - "type" : "text", - "store" : true + "mappings": { + "doc": { + "properties": { + "comments": { + "type": "nested", + "properties": { + "text": { + "type": "text", + "store": true } } } + } } + } +} + +PUT test/doc/1?refresh +{ + "title": "Test title", + "comments": [ + { + "author": "kimchy", + "text": "comment text" + }, + { + "author": "nik9000", + "text": "words words words" + } + ] +} + +POST test/_search +{ + "query": { + "nested": { + "path": "comments", + "query": { + "match": {"comments.text" : "words"} + }, + "inner_hits": { + "_source" : false, + "stored_fields" : ["comments.text"] + } + } + } } -------------------------------------------------- +// CONSOLE -Disable including source and include specific stored fields in the inner hits definition: +//// + +Response not included in text but tested for completeness sake. [source,js] -------------------------------------------------- { - "query" : { - "nested" : { - "path" : "comments", - "query" : { - "match" : {"comments.message" : "[actual query]"} - }, - "inner_hits" : { - "_source" : false, - "stored_fields" : ["comments.text"] + ..., + "hits": { + "total": 1, + "max_score": 0.9651416, + "hits": [ + { + "_index": "test", + "_type": "doc", + "_id": "1", + "_score": 0.9651416, + "_source": ..., + "inner_hits": { + "comments": { <1> + "hits": { + "total": 1, + "max_score": 0.9651416, + "hits": [ + { + "_nested": { + "field": "comments", + "offset": 1 + }, + "_score": 0.9651416, + "fields": { + "comments.text": [ + "words words words" + ] + } + } + ] } + } } - } + } + ] + } } -------------------------------------------------- +// TESTRESPONSE[s/"_source": \.\.\./"_source": $body.hits.hits.0._source/] +// TESTRESPONSE[s/\.\.\./"timed_out": false, "took": $body.took, "_shards": $body._shards/] + +//// [[hierarchical-nested-inner-hits]] ==== Hierarchical levels of nested object fields and inner hits. @@ -206,16 +312,113 @@ with the root hits then the following path can be defined: [source,js] -------------------------------------------------- +PUT test { - "query" : { - "nested" : { - "path" : "comments.votes", - "query" : { ... }, - "inner_hits" : {} + "mappings": { + "doc": { + "properties": { + "comments": { + "type": "nested", + "properties": { + "message": { + "type": "text", + "store": true + }, + "votes": { + "type": "nested" + } + } + } } } + } +} + +PUT test/doc/1?refresh +{ + "title": "Test title", + "comments": [ + { + "author": "kimchy", + "text": "comment text", + "votes": [] + }, + { + "author": "nik9000", + "text": "words words words", + "votes": [ + {"value": 1 , "voter": "kimchy"}, + {"value": -1, "voter": "other"} + ] + } + ] +} + +POST test/_search +{ + "query": { + "nested": { + "path": "comments.votes", + "query": { + "match": { + "comments.votes.voter": "kimchy" + } + }, + "inner_hits" : {} + } + } } -------------------------------------------------- +// CONSOLE + +Which would look like: + +[source,js] +-------------------------------------------------- +{ + ..., + "hits": { + "total": 1, + "max_score": 0.6931472, + "hits": [ + { + "_index": "test", + "_type": "doc", + "_id": "1", + "_score": 0.6931472, + "_source": ..., + "inner_hits": { + "comments.votes": { <1> + "hits": { + "total": 1, + "max_score": 0.6931472, + "hits": [ + { + "_nested": { + "field": "comments", + "offset": 1, + "_nested": { + "field": "votes", + "offset": 0 + } + }, + "_score": 0.6931472, + "_source": { + "value": 1, + "voter": "kimchy" + } + } + ] + } + } + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_source": \.\.\./"_source": $body.hits.hits.0._source/] +// TESTRESPONSE[s/\.\.\./"timed_out": false, "took": $body.took, "_shards": $body._shards/] This indirect referencing is only supported for nested inner hits. @@ -224,22 +427,49 @@ This indirect referencing is only supported for nested inner hits. The parent/child `inner_hits` can be used to include parent or child -The examples below assumes that there is a `_parent` field mapping in the `comment` type: - [source,js] -------------------------------------------------- +PUT test { - "query" : { - "has_child" : { - "type" : "comment", - "query" : { - "match" : {"message" : "[actual query]"} - }, - "inner_hits" : {} <1> - } + "settings": { + "mapping.single_type": false + }, + "mappings": { + "my_parent": {}, + "my_child": { + "_parent": { + "type": "my_parent" + } } + } +} + +PUT test/my_parent/1?refresh +{ + "test": "test" +} + +PUT test/my_child/1?parent=1&refresh +{ + "test": "test" +} + +POST test/_search +{ + "query": { + "has_child": { + "type": "my_child", + "query": { + "match": { + "test": "test" + } + }, + "inner_hits": {} <1> + } + } } -------------------------------------------------- +// CONSOLE <1> The inner hit definition like in the nested example. @@ -247,30 +477,42 @@ An example of a response snippet that could be generated from the above search r [source,js] -------------------------------------------------- -... -"hits": { - ... - "hits": [ - { - "_index": "my-index", - "_type": "question", +{ + ..., + "hits": { + "total": 1, + "max_score": 1.0, + "hits": [ + { + "_index": "test", + "_type": "my_parent", "_id": "1", + "_score": 1.0, "_source": ..., "inner_hits": { - "comment": { - "hits": { - "total": ..., - "hits": [ - { - "_type": "comment", - "_id": "5", - "_source": ... - }, - ... - ] - } - } + "my_child": { + "hits": { + "total": 1, + "max_score": 0.18232156, + "hits": [ + { + "_type": "my_child", + "_id": "1", + "_score": 0.18232156, + "_routing": "1", + "_parent": "1", + "_source": { + "test": "test" + } + } + ] + } + } } - }, - ... --------------------------------------------------- \ No newline at end of file + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"_source": \.\.\./"_source": $body.hits.hits.0._source/] +// TESTRESPONSE[s/\.\.\./"timed_out": false, "took": $body.took, "_shards": $body._shards/] diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 1515a182d42..b20117bb75d 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -4,7 +4,7 @@ The search shards api returns the indices and shards that a search request would be executed against. This can give useful feedback for working out issues or planning optimizations with routing and shard preferences. When filtered aliases -are used, the filter is returned as part of the `indices` section. +are used, the filter is returned as part of the `indices` section [5.1.0] Added in 5.1.0. The `index` may be a single value, or comma-separated. @@ -165,4 +165,4 @@ routing values have been specified. `local`:: A boolean value whether to read the cluster state locally in order to determine where shards are allocated instead of using the Master node's - cluster state. + cluster state. \ No newline at end of file diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 6bf17a4ac39..3f21a18de72 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -130,7 +130,7 @@ PUT music/song/1?refresh // TEST[continued] You can use the following shorthand form. Note that you can not specify -a weight with suggestion(s). +a weight with suggestion(s) in the shorthand form. [source,js] -------------------------------------------------- diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 2b0ce48152e..7218f6e6b23 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -227,13 +227,13 @@ Response: "index": "twitter", "shard": 0, "valid": true, - "explanation": "+MatchNoDocsQuery(\"empty BooleanQuery\") #ConstantScore(MatchNoDocsQuery(\"empty BooleanQuery\"))" + "explanation": "user:kimchy~2" }, { "index": "twitter", "shard": 1, "valid": true, - "explanation": "+MatchNoDocsQuery(\"empty BooleanQuery\") #ConstantScore(MatchNoDocsQuery(\"empty BooleanQuery\"))" + "explanation": "user:kimchy~2" }, { "index": "twitter", @@ -251,7 +251,7 @@ Response: "index": "twitter", "shard": 4, "valid": true, - "explanation": "+MatchNoDocsQuery(\"empty BooleanQuery\") #ConstantScore(MatchNoDocsQuery(\"empty BooleanQuery\"))" + "explanation": "user:kimchy~2" } ] } diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 31b41261b3a..85f609d7757 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,6 +33,10 @@ dependencyLicenses { mapping from: /asm-.*/, to: 'asm' } +test { + jvmArg '-XX:-OmitStackTraceInFastThrow' +} + integTestCluster { setting 'script.max_compilations_per_minute', '1000' } @@ -146,5 +150,3 @@ task regen { } } } - - diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java index 91bbe1c1a9b..3d509db6561 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java @@ -101,7 +101,11 @@ class Netty4InternalESLogger extends AbstractInternalLogger { @Override public void info(String msg) { - logger.info(msg); + if (!("Your platform does not provide complete low-level API for accessing direct buffers reliably. " + + "Unless explicitly requested, heap buffer will always be preferred to avoid potential system " + + "instability.").equals(msg)) { + logger.info(msg); + } } @Override diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 6bff34a667c..0240781c1aa 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -22,6 +22,7 @@ package org.elasticsearch.plugin.discovery.azure.classic; import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -76,11 +77,12 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, + AllocationService allocationService) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(AZURE, () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider)); + clusterSettings, hostsProvider, allocationService)); } @Override diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index 3c80d0eda06..3280368631b 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -24,6 +24,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.SpecialPermission; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -98,11 +99,12 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, + AllocationService allocationService) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(EC2, () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider)); + clusterSettings, hostsProvider, allocationService)); } @Override diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index 44fb5c8d15e..d20b5eaef05 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -29,6 +29,7 @@ import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.network.GceNameResolver; import org.elasticsearch.cloud.gce.util.Access; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; @@ -86,11 +87,12 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, + AllocationService allocationService) { // this is for backcompat with pre 5.1, where users would set discovery.type to use ec2 hosts provider return Collections.singletonMap(GCE, () -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, hostsProvider)); + clusterSettings, hostsProvider, allocationService)); } @Override diff --git a/plugins/jvm-example/build.gradle b/plugins/jvm-example/build.gradle index e8a37a144a5..fb362e6fa36 100644 --- a/plugins/jvm-example/build.gradle +++ b/plugins/jvm-example/build.gradle @@ -33,7 +33,7 @@ dependencies { exampleFixture project(':test:fixtures:example-fixture') } -task exampleFixture(type: org.elasticsearch.gradle.test.Fixture) { +task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.exampleFixture executable = new File(project.javaHome, 'bin/java') args '-cp', "${ -> project.configurations.exampleFixture.asPath }", diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 82548f3410e..f17819dba40 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -60,7 +60,7 @@ dependencyLicenses { mapping from: /hadoop-.*/, to: 'hadoop' } -task hdfsFixture(type: org.elasticsearch.gradle.test.Fixture) { +task hdfsFixture(type: org.elasticsearch.gradle.test.AntFixture) { dependsOn project.configurations.hdfsFixture executable = new File(project.javaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index 7ce5e8d3cd8..8d88b7fd074 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -29,23 +29,21 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import java.io.IOException; -import java.lang.reflect.ReflectPermission; -import java.net.SocketPermission; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; -import javax.security.auth.AuthPermission; - final class HdfsBlobStore implements BlobStore { private final Path root; private final FileContext fileContext; + private final HdfsSecurityContext securityContext; private final int bufferSize; private volatile boolean closed; HdfsBlobStore(FileContext fileContext, String path, int bufferSize) throws IOException { this.fileContext = fileContext; + this.securityContext = new HdfsSecurityContext(fileContext.getUgi()); this.bufferSize = bufferSize; this.root = execute(fileContext1 -> fileContext1.makeQualified(new Path(path))); try { @@ -107,9 +105,6 @@ final class HdfsBlobStore implements BlobStore { /** * Executes the provided operation against this store */ - // we can do FS ops with only two elevated permissions: - // 1) hadoop dynamic proxy is messy with access rules - // 2) allow hadoop to add credentials to our Subject <V> V execute(Operation<V> operation) throws IOException { SpecialPermission.check(); if (closed) { @@ -117,8 +112,12 @@ final class HdfsBlobStore implements BlobStore { } try { return AccessController.doPrivileged((PrivilegedExceptionAction<V>) - () -> operation.run(fileContext), null, new ReflectPermission("suppressAccessChecks"), - new AuthPermission("modifyPrivateCredentials"), new SocketPermission("*", "connect")); + () -> { + securityContext.ensureLogin(); + return operation.run(fileContext); + }, + null, + securityContext.getRestrictedExecutionPermissions()); } catch (PrivilegedActionException pae) { throw (IOException) pae.getException(); } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java index 9ea53a5acf2..4e51ab23b80 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java @@ -26,6 +26,9 @@ import java.security.PrivilegedAction; import java.util.Collections; import java.util.Map; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; +import org.apache.hadoop.security.KerberosInfo; +import org.apache.hadoop.security.SecurityUtil; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -40,6 +43,7 @@ public final class HdfsPlugin extends Plugin implements RepositoryPlugin { static { SpecialPermission.check(); AccessController.doPrivileged((PrivilegedAction<Void>) HdfsPlugin::evilHadoopInit); + AccessController.doPrivileged((PrivilegedAction<Void>) HdfsPlugin::eagerInit); } @SuppressForbidden(reason = "Needs a security hack for hadoop on windows, until HADOOP-XXXX is fixed") @@ -79,6 +83,34 @@ public final class HdfsPlugin extends Plugin implements RepositoryPlugin { return null; } + private static Void eagerInit() { + /* + * Hadoop RPC wire serialization uses ProtocolBuffers. All proto classes for Hadoop + * come annotated with configurations that denote information about if they support + * certain security options like Kerberos, and how to send information with the + * message to support that authentication method. SecurityUtil creates a service loader + * in a static field during its clinit. This loader provides the implementations that + * pull the security information for each proto class. The service loader sources its + * services from the current thread's context class loader, which must contain the Hadoop + * jars. Since plugins don't execute with their class loaders installed as the thread's + * context class loader, we need to install the loader briefly, allow the util to be + * initialized, then restore the old loader since we don't actually own this thread. + */ + ClassLoader oldCCL = Thread.currentThread().getContextClassLoader(); + try { + Thread.currentThread().setContextClassLoader(HdfsRepository.class.getClassLoader()); + KerberosInfo info = SecurityUtil.getKerberosInfo(ClientNamenodeProtocolPB.class, null); + // Make sure that the correct class loader was installed. + if (info == null) { + throw new RuntimeException("Could not initialize SecurityUtil: " + + "Unable to find services for [org.apache.hadoop.security.SecurityInfo]"); + } + } finally { + Thread.currentThread().setContextClassLoader(oldCCL); + } + return null; + } + @Override public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.singletonMap("hdfs", (metadata) -> new HdfsRepository(metadata, env, namedXContentRegistry)); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index d784e8bf093..16ed9d06a5e 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -19,29 +19,31 @@ package org.elasticsearch.repositories.hdfs; import java.io.IOException; -import java.lang.reflect.Constructor; +import java.io.UncheckedIOException; +import java.net.InetAddress; import java.net.URI; +import java.net.UnknownHostException; import java.security.AccessController; -import java.security.Principal; import java.security.PrivilegedAction; -import java.util.Collections; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; -import javax.security.auth.Subject; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.UnsupportedFileSystemException; -import org.elasticsearch.ElasticsearchGenerationException; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -51,9 +53,14 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository; public final class HdfsRepository extends BlobStoreRepository { - private final BlobPath basePath = BlobPath.cleanPath(); + private static final Logger LOGGER = Loggers.getLogger(HdfsRepository.class); + + private static final String CONF_SECURITY_PRINCIPAL = "security.principal"; + + private final Environment environment; private final ByteSizeValue chunkSize; private final boolean compress; + private final BlobPath basePath = BlobPath.cleanPath(); private HdfsBlobStore blobStore; @@ -65,6 +72,7 @@ public final class HdfsRepository extends BlobStoreRepository { NamedXContentRegistry namedXContentRegistry) throws IOException { super(metadata, environment.settings(), namedXContentRegistry); + this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); this.compress = metadata.settings().getAsBoolean("compress", false); } @@ -101,49 +109,116 @@ public final class HdfsRepository extends BlobStoreRepository { blobStore = new HdfsBlobStore(fileContext, pathSetting, bufferSize); logger.debug("Using file-system [{}] for URI [{}], path [{}]", fileContext.getDefaultFileSystem(), fileContext.getDefaultFileSystem().getUri(), pathSetting); } catch (IOException e) { - throw new ElasticsearchGenerationException(String.format(Locale.ROOT, "Cannot create HDFS repository for uri [%s]", uri), e); + throw new UncheckedIOException(String.format(Locale.ROOT, "Cannot create HDFS repository for uri [%s]", uri), e); } super.doStart(); } // create hadoop filecontext - @SuppressForbidden(reason = "lesser of two evils (the other being a bunch of JNI/classloader nightmares)") - private static FileContext createContext(URI uri, Settings repositorySettings) { - Configuration cfg = new Configuration(repositorySettings.getAsBoolean("load_defaults", true)); - cfg.setClassLoader(HdfsRepository.class.getClassLoader()); - cfg.reloadConfiguration(); + private FileContext createContext(URI uri, Settings repositorySettings) { + Configuration hadoopConfiguration = new Configuration(repositorySettings.getAsBoolean("load_defaults", true)); + hadoopConfiguration.setClassLoader(HdfsRepository.class.getClassLoader()); + hadoopConfiguration.reloadConfiguration(); Map<String, String> map = repositorySettings.getByPrefix("conf.").getAsMap(); for (Entry<String, String> entry : map.entrySet()) { - cfg.set(entry.getKey(), entry.getValue()); + hadoopConfiguration.set(entry.getKey(), entry.getValue()); } - // create a hadoop user. if we want some auth, it must be done different anyway, and tested. - Subject subject; - try { - Class<?> clazz = Class.forName("org.apache.hadoop.security.User"); - Constructor<?> ctor = clazz.getConstructor(String.class); - ctor.setAccessible(true); - Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name")); - subject = new Subject(false, Collections.singleton(principal), Collections.emptySet(), Collections.emptySet()); - } catch (ReflectiveOperationException e) { - throw new RuntimeException(e); - } + // Create a hadoop user + UserGroupInformation ugi = login(hadoopConfiguration, repositorySettings); - // disable FS cache - cfg.setBoolean("fs.hdfs.impl.disable.cache", true); + // Disable FS cache + hadoopConfiguration.setBoolean("fs.hdfs.impl.disable.cache", true); - // create the filecontext with our user - return Subject.doAs(subject, (PrivilegedAction<FileContext>) () -> { + // Create the filecontext with our user information + // This will correctly configure the filecontext to have our UGI as it's internal user. + return ugi.doAs((PrivilegedAction<FileContext>) () -> { try { - AbstractFileSystem fs = AbstractFileSystem.get(uri, cfg); - return FileContext.getFileContext(fs, cfg); + AbstractFileSystem fs = AbstractFileSystem.get(uri, hadoopConfiguration); + return FileContext.getFileContext(fs, hadoopConfiguration); } catch (UnsupportedFileSystemException e) { - throw new RuntimeException(e); + throw new UncheckedIOException(e); } }); } + private UserGroupInformation login(Configuration hadoopConfiguration, Settings repositorySettings) { + // Validate the authentication method: + AuthenticationMethod authMethod = SecurityUtil.getAuthenticationMethod(hadoopConfiguration); + if (authMethod.equals(AuthenticationMethod.SIMPLE) == false + && authMethod.equals(AuthenticationMethod.KERBEROS) == false) { + throw new RuntimeException("Unsupported authorization mode ["+authMethod+"]"); + } + + // Check if the user added a principal to use, and that there is a keytab file provided + String kerberosPrincipal = repositorySettings.get(CONF_SECURITY_PRINCIPAL); + + // Check to see if the authentication method is compatible + if (kerberosPrincipal != null && authMethod.equals(AuthenticationMethod.SIMPLE)) { + LOGGER.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " + + "specified. Continuing with [KERBEROS] authentication."); + SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, hadoopConfiguration); + } else if (kerberosPrincipal == null && authMethod.equals(AuthenticationMethod.KERBEROS)) { + throw new RuntimeException("HDFS Repository does not support [KERBEROS] authentication without " + + "a valid Kerberos principal and keytab. Please specify a principal in the repository settings with [" + + CONF_SECURITY_PRINCIPAL + "]."); + } + + // Now we can initialize the UGI with the configuration. + UserGroupInformation.setConfiguration(hadoopConfiguration); + + // Debugging + LOGGER.debug("Hadoop security enabled: [{}]", UserGroupInformation.isSecurityEnabled()); + LOGGER.debug("Using Hadoop authentication method: [{}]", SecurityUtil.getAuthenticationMethod(hadoopConfiguration)); + + // UserGroupInformation (UGI) instance is just a Hadoop specific wrapper around a Java Subject + try { + if (UserGroupInformation.isSecurityEnabled()) { + String principal = preparePrincipal(kerberosPrincipal); + String keytab = HdfsSecurityContext.locateKeytabFile(environment).toString(); + LOGGER.debug("Using kerberos principal [{}] and keytab located at [{}]", principal, keytab); + return UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab); + } + return UserGroupInformation.getCurrentUser(); + } catch (IOException e) { + throw new UncheckedIOException("Could not retrieve the current user information", e); + } + } + + // Convert principals of the format 'service/_HOST@REALM' by subbing in the local address for '_HOST'. + private static String preparePrincipal(String originalPrincipal) { + String finalPrincipal = originalPrincipal; + // Don't worry about host name resolution if they don't have the _HOST pattern in the name. + if (originalPrincipal.contains("_HOST")) { + try { + finalPrincipal = SecurityUtil.getServerPrincipal(originalPrincipal, getHostName()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + + if (originalPrincipal.equals(finalPrincipal) == false) { + LOGGER.debug("Found service principal. Converted original principal name [{}] to server principal [{}]", + originalPrincipal, finalPrincipal); + } + } + return finalPrincipal; + } + + @SuppressForbidden(reason = "InetAddress.getLocalHost(); Needed for filling in hostname for a kerberos principal name pattern.") + private static String getHostName() { + try { + /* + * This should not block since it should already be resolved via Log4J and Netty. The + * host information is cached by the JVM and the TTL for the cache entry is infinite + * when the SecurityManager is activated. + */ + return InetAddress.getLocalHost().getCanonicalHostName(); + } catch (UnknownHostException e) { + throw new RuntimeException("Could not locate host information", e); + } + } + @Override protected BlobStore blobStore() { return blobStore; diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java new file mode 100644 index 00000000000..3cd1a5a40fd --- /dev/null +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.hdfs; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.lang.reflect.ReflectPermission; +import java.net.SocketPermission; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.Permission; +import java.util.Arrays; +import java.util.Locale; +import java.util.function.Supplier; +import javax.security.auth.AuthPermission; +import javax.security.auth.PrivateCredentialPermission; +import javax.security.auth.kerberos.ServicePermission; + +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.env.Environment; + +/** + * Oversees all the security specific logic for the HDFS Repository plugin. + * + * Keeps track of the current user for a given repository, as well as which + * permissions to grant the blob store restricted execution methods. + */ +class HdfsSecurityContext { + + private static final Logger LOGGER = Loggers.getLogger(HdfsSecurityContext.class); + + private static final Permission[] SIMPLE_AUTH_PERMISSIONS; + private static final Permission[] KERBEROS_AUTH_PERMISSIONS; + static { + // We can do FS ops with only a few elevated permissions: + SIMPLE_AUTH_PERMISSIONS = new Permission[]{ + new SocketPermission("*", "connect"), + // 1) hadoop dynamic proxy is messy with access rules + new ReflectPermission("suppressAccessChecks"), + // 2) allow hadoop to add credentials to our Subject + new AuthPermission("modifyPrivateCredentials") + }; + + // If Security is enabled, we need all the following elevated permissions: + KERBEROS_AUTH_PERMISSIONS = new Permission[] { + new SocketPermission("*", "connect"), + // 1) hadoop dynamic proxy is messy with access rules + new ReflectPermission("suppressAccessChecks"), + // 2) allow hadoop to add credentials to our Subject + new AuthPermission("modifyPrivateCredentials"), + // 3) allow hadoop to act as the logged in Subject + new AuthPermission("doAs"), + // 4) Listen and resolve permissions for kerberos server principals + new SocketPermission("localhost:0", "listen,resolve"), + // We add the following since hadoop requires the client to re-login when the kerberos ticket expires: + // 5) All the permissions needed for UGI to do its weird JAAS hack + new RuntimePermission("getClassLoader"), + new RuntimePermission("setContextClassLoader"), + // 6) Additional permissions for the login modules + new AuthPermission("modifyPrincipals"), + new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read"), + new PrivateCredentialPermission("javax.security.auth.kerberos.KerberosTicket * \"*\"", "read"), + new PrivateCredentialPermission("javax.security.auth.kerberos.KeyTab * \"*\"", "read") + // Included later: + // 7) allow code to initiate kerberos connections as the logged in user + // Still far and away fewer permissions than the original full plugin policy + }; + } + + /** + * Locates the keytab file in the environment and verifies that it exists. + * Expects keytab file to exist at {@code $CONFIG_DIR$/repository-hdfs/krb5.keytab} + */ + static Path locateKeytabFile(Environment environment) { + Path keytabPath = environment.configFile().resolve("repository-hdfs").resolve("krb5.keytab"); + try { + if (Files.exists(keytabPath) == false) { + throw new RuntimeException("Could not locate keytab at [" + keytabPath + "]."); + } + } catch (SecurityException se) { + throw new RuntimeException("Could not locate keytab at [" + keytabPath + "]", se); + } + return keytabPath; + } + + private final UserGroupInformation ugi; + private final Permission[] restrictedExecutionPermissions; + + HdfsSecurityContext(UserGroupInformation ugi) { + this.ugi = ugi; + this.restrictedExecutionPermissions = renderPermissions(ugi); + } + + private Permission[] renderPermissions(UserGroupInformation ugi) { + Permission[] permissions; + if (ugi.isFromKeytab()) { + // KERBEROS + // Leave room to append one extra permission based on the logged in user's info. + int permlen = KERBEROS_AUTH_PERMISSIONS.length + 1; + permissions = new Permission[permlen]; + + System.arraycopy(KERBEROS_AUTH_PERMISSIONS, 0, permissions, 0, KERBEROS_AUTH_PERMISSIONS.length); + + // Append a kerberos.ServicePermission to only allow initiating kerberos connections + // as the logged in user. + permissions[permissions.length - 1] = new ServicePermission(ugi.getUserName(), "initiate"); + } else { + // SIMPLE + permissions = Arrays.copyOf(SIMPLE_AUTH_PERMISSIONS, SIMPLE_AUTH_PERMISSIONS.length); + } + return permissions; + } + + Permission[] getRestrictedExecutionPermissions() { + return restrictedExecutionPermissions; + } + + void ensureLogin() { + if (ugi.isFromKeytab()) { + try { + ugi.checkTGTAndReloginFromKeytab(); + } catch (IOException ioe) { + throw new UncheckedIOException("Could not re-authenticate", ioe); + } + } + } +} diff --git a/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy index b800f3eee46..f6476f290bc 100644 --- a/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy @@ -25,17 +25,60 @@ grant { permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + // Needed so that Hadoop can load the correct classes for SPI and JAAS + // org.apache.hadoop.security.SecurityUtil clinit + // org.apache.hadoop.security.UserGroupInformation.newLoginContext() + permission java.lang.RuntimePermission "setContextClassLoader"; + // org.apache.hadoop.util.StringUtils clinit permission java.util.PropertyPermission "*", "read,write"; // org.apache.hadoop.util.ShutdownHookManager clinit permission java.lang.RuntimePermission "shutdownHooks"; - // JAAS is used always, we use a fake subject, hurts nobody + // JAAS is used by Hadoop for authentication purposes + // The Hadoop Login JAAS module modifies a Subject's private credentials and principals + // The Hadoop RPC Layer must be able to read these credentials, and initiate Kerberos connections + + // org.apache.hadoop.security.UserGroupInformation.getCurrentUser() permission javax.security.auth.AuthPermission "getSubject"; + + // org.apache.hadoop.security.UserGroupInformation.doAs() permission javax.security.auth.AuthPermission "doAs"; + + // org.apache.hadoop.security.UserGroupInformation.getCredentialsInternal() + permission javax.security.auth.PrivateCredentialPermission "org.apache.hadoop.security.Credentials * \"*\"", "read"; + + // Hadoop depends on the Kerberos login module for kerberos authentication + // com.sun.security.auth.module.Krb5LoginModule.login() + permission java.lang.RuntimePermission "accessClassInPackage.sun.security.krb5"; + + // com.sun.security.auth.module.Krb5LoginModule.commit() permission javax.security.auth.AuthPermission "modifyPrivateCredentials"; + permission javax.security.auth.AuthPermission "modifyPrincipals"; + permission javax.security.auth.PrivateCredentialPermission "javax.security.auth.kerberos.KeyTab * \"*\"", "read"; + permission javax.security.auth.PrivateCredentialPermission "javax.security.auth.kerberos.KerberosTicket * \"*\"", "read"; + + // Hadoop depends on OS level user information for simple authentication + // Unix: UnixLoginModule: com.sun.security.auth.module.UnixSystem.UnixSystem init + permission java.lang.RuntimePermission "loadLibrary.jaas_unix"; + // Windows: NTLoginModule: com.sun.security.auth.module.NTSystem.loadNative + permission java.lang.RuntimePermission "loadLibrary.jaas_nt"; + permission javax.security.auth.AuthPermission "modifyPublicCredentials"; + + // org.apache.hadoop.security.SaslRpcServer.init() + permission java.security.SecurityPermission "putProviderProperty.SaslPlainServer"; + + // org.apache.hadoop.security.SaslPlainServer.SecurityProvider.SecurityProvider init + permission java.security.SecurityPermission "insertProvider.SaslPlainServer"; + + // org.apache.hadoop.security.SaslRpcClient.getServerPrincipal -> KerberosPrincipal init + permission javax.security.auth.kerberos.ServicePermission "*", "initiate"; // hdfs client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; + + // client binds to the address returned from the host name of any principal set up as a service principal + // org.apache.hadoop.ipc.Client.Connection.setupConnection + permission java.net.SocketPermission "localhost:0", "listen,resolve"; }; diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml index bca0703d457..e6b0e9d13c0 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml @@ -153,3 +153,15 @@ - match: { hits.total: 2 } - match: { hits.hits.0._source.filter_field: 1 } - match: { hits.hits.0._index: "my_remote_cluster:test_index" } + +--- +"Search an filtered alias and empty index on the remote cluster": + + - do: + search: + index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1 + + - match: { _shards.total: 8 } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.filter_field: 1 } + - match: { hits.hits.0._index: "my_remote_cluster:test_index" } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yaml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yaml new file mode 100644 index 00000000000..b5be2f7e124 --- /dev/null +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/30_field_caps.yaml @@ -0,0 +1,66 @@ +--- +"Get simple field caps from remote cluster": + - skip: + version: " - 5.4.99" + reason: this uses a new API functionality that has been added in 5.5.0 + + - do: + indices.create: + index: field_caps_index_2 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: true + nested2: + type: float + doc_values: true + + - do: + field_caps: + index: 'field_caps_index_2,my_remote_cluster:field_*' + fields: [text, keyword, number, geo] + + - match: {fields.text.text.searchable: true} + - match: {fields.text.text.aggregatable: false} + - is_false: fields.text.text.indices + - is_false: fields.text.text.non_searchable_indices + - is_false: fields.text.text.non_aggregatable_indices + - match: {fields.keyword.keyword.searchable: true} + - match: {fields.keyword.keyword.aggregatable: true} + - is_false: fields.text.keyword.indices + - is_false: fields.text.keyword.non_searchable_indices + - is_false: fields.text.keyword.non_aggregatable_indices + - match: {fields.number.double.searchable: true} + - match: {fields.number.double.aggregatable: true} + - match: {fields.number.double.indices: ["field_caps_index_2", "my_remote_cluster:field_caps_index_1"]} + - is_false: fields.number.double.non_searchable_indices + - is_false: fields.number.double.non_aggregatable_indices + - match: {fields.number.long.searchable: true} + - match: {fields.number.long.aggregatable: true} + - match: {fields.number.long.indices: ["my_remote_cluster:field_caps_index_3"]} + - is_false: fields.number.long.non_searchable_indices + - is_false: fields.number.long.non_aggregatable_indices + - match: {fields.geo.geo_point.searchable: true} + - match: {fields.geo.geo_point.aggregatable: true} + - match: {fields.geo.geo_point.indices: ["field_caps_index_2", "my_remote_cluster:field_caps_index_1"]} + - is_false: fields.geo.geo_point.non_searchable_indices + - is_false: fields.geo.geo_point.non_aggregatable_indices + - match: {fields.geo.keyword.searchable: true} + - match: {fields.geo.keyword.aggregatable: true} + - match: {fields.geo.keyword.indices: ["my_remote_cluster:field_caps_index_3"]} + - is_false: fields.geo.keyword.non_searchable_indices + - is_false: fields.geo.keyword.on_aggregatable_indices diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yaml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yaml index 5c68a591142..c3162383843 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yaml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yaml @@ -1,6 +1,54 @@ --- "Index data and search on the old cluster": + - do: + indices.create: + index: field_caps_index_1 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: false + nested2: + type: float + doc_values: false + - do: + indices.create: + index: field_caps_index_3 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: long + geo: + type: keyword + object: + type: object + properties: + nested1 : + type : long + index: false + nested2: + type: keyword + doc_values: false - do: indices.create: index: test_index diff --git a/qa/vagrant/versions b/qa/vagrant/versions index 996b8f7e8a2..e65c667a240 100644 --- a/qa/vagrant/versions +++ b/qa/vagrant/versions @@ -9,3 +9,4 @@ 5.3.0 5.3.1 5.3.2 +5.4.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yaml index cef72b6e3fe..f22afb91169 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yaml @@ -165,3 +165,45 @@ setup: - match: {fields.number.long.indices: ["test3"]} - is_false: fields.number.long.non_searchable_indices - is_false: fields.number.long.non_aggregatable_indices + +--- +"Mix in non-existing field field caps": + - skip: + version: " - 5.4.0" + reason: "#24504 fixed a bug in this API in 5.4.1" + + - do: + field_caps: + index: 'test1,test2,test3' + fields: [text, keyword, no_such_field, number, geo] + + - match: {fields.text.text.searchable: true} + - match: {fields.text.text.aggregatable: false} + - is_false: fields.text.text.indices + - is_false: fields.text.text.non_searchable_indices + - is_false: fields.text.text.non_aggregatable_indices + - match: {fields.keyword.keyword.searchable: true} + - match: {fields.keyword.keyword.aggregatable: true} + - is_false: fields.text.keyword.indices + - is_false: fields.text.keyword.non_searchable_indices + - is_false: fields.text.keyword.non_aggregatable_indices + - match: {fields.number.double.searchable: true} + - match: {fields.number.double.aggregatable: true} + - match: {fields.number.double.indices: ["test1", "test2"]} + - is_false: fields.number.double.non_searchable_indices + - is_false: fields.number.double.non_aggregatable_indices + - match: {fields.number.long.searchable: true} + - match: {fields.number.long.aggregatable: true} + - match: {fields.number.long.indices: ["test3"]} + - is_false: fields.number.long.non_searchable_indices + - is_false: fields.number.long.non_aggregatable_indices + - match: {fields.geo.geo_point.searchable: true} + - match: {fields.geo.geo_point.aggregatable: true} + - match: {fields.geo.geo_point.indices: ["test1", "test2"]} + - is_false: fields.geo.geo_point.non_searchable_indices + - is_false: fields.geo.geo_point.non_aggregatable_indices + - match: {fields.geo.keyword.searchable: true} + - match: {fields.geo.keyword.aggregatable: true} + - match: {fields.geo.keyword.indices: ["test3"]} + - is_false: fields.geo.keyword.non_searchable_indices + - is_false: fields.geo.keyword.on_aggregatable_indices diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml index 42189883b1b..c2d341e3439 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml @@ -14,8 +14,8 @@ --- "Search shards aliases with and without filters": - skip: - version: " - 5.0.99" - reason: indices section was added in 5.1.0 + version: " - 5.4.0" + reason: "#24489 fixed a bug that not all aliases where added in 5.4.1 - indices section was added in 5.1.0" - do: indices.create: @@ -49,7 +49,7 @@ - match: { shards.0.0.index: test_index } - is_true: indices.test_index - is_false: indices.test_index.filter - - is_false: indices.test_index.aliases + - match: { indices.test_index.aliases: [test_alias_no_filter]} - do: search_shards: @@ -78,3 +78,30 @@ - match: { indices.test_index.filter.bool.adjust_pure_negative: true} - lte: { indices.test_index.filter.bool.boost: 1.0 } - gte: { indices.test_index.filter.bool.boost: 1.0 } + + - do: + search_shards: + index: "test*" + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_filter_2, test_alias_no_filter]} + - is_false: indices.test_index.filter + + - do: + search_shards: + index: ["test_alias_filter_1","test_alias_no_filter"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_no_filter]} + - is_false: indices.test_index.filter + + - do: + search_shards: + index: ["test_alias_no_filter"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_no_filter]} + - is_false: indices.test_index.filter diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 02f8896be4d..e1205ba846b 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -205,7 +205,7 @@ public abstract class ESAllocationTestCase extends ESTestCase { protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator { public DelayedShardsMockGatewayAllocator() { - super(Settings.EMPTY, null, null); + super(Settings.EMPTY); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 95013ee9649..80a0e9486f1 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -239,7 +239,7 @@ public abstract class IndexShardTestCase extends ESTestCase { @Nullable EngineFactory engineFactory, IndexingOperationListener... listeners) throws IOException { - // add node id as name to settings for popper logging + // add node id as name to settings for proper logging final ShardId shardId = routing.shardId(); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java index cb4b8e098ae..af1bbc94d27 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java +++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/TestZenDiscovery.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.function.Supplier; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -59,10 +60,11 @@ public class TestZenDiscovery extends ZenDiscovery { public Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, - ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) { + ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, + AllocationService allocationService) { return Collections.singletonMap("test-zen", () -> new TestZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, - clusterApplier, clusterSettings, hostsProvider)); + clusterApplier, clusterSettings, hostsProvider, allocationService)); } @Override @@ -78,9 +80,10 @@ public class TestZenDiscovery extends ZenDiscovery { private TestZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, - ClusterApplier clusterApplier, ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) { + ClusterApplier clusterApplier, ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, + AllocationService allocationService) { super(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, clusterSettings, - hostsProvider); + hostsProvider, allocationService); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java index b2b41b31461..d3e05d36f6e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java +++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java @@ -35,7 +35,7 @@ public class NoopGatewayAllocator extends GatewayAllocator { public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); protected NoopGatewayAllocator() { - super(Settings.EMPTY, null, null); + super(Settings.EMPTY); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java index f8c8c4694e5..2bbf2ce4c2c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java @@ -96,7 +96,7 @@ public class TestGatewayAllocator extends GatewayAllocator { }; public TestGatewayAllocator() { - super(Settings.EMPTY, null, null); + super(Settings.EMPTY); } @Override