mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge remote-tracking branch 'origin/master' into feature/client_aggs_parsing
This commit is contained in:
commit
c7c524dd3f
48
.github/ISSUE_TEMPLATE.md
vendored
48
.github/ISSUE_TEMPLATE.md
vendored
@ -1,40 +1,46 @@
|
|||||||
<!--
|
<!--
|
||||||
GitHub is reserved for bug reports and feature requests. The best place
|
|
||||||
to ask a general question is at the Elastic Discourse forums at
|
** Please read the guidelines below. **
|
||||||
https://discuss.elastic.co. If you are in fact posting a bug report or
|
|
||||||
a feature request, please include one and only one of the below blocks
|
Issues that do not follow these guidelines are likely to be closed.
|
||||||
in your new issue. Note that whether you're filing a bug report or a
|
|
||||||
feature request, ensure that your submission is for an
|
1. GitHub is reserved for bug reports and feature requests. The best place to
|
||||||
[OS that we support](https://www.elastic.co/support/matrix#show_os).
|
ask a general question is at the Elastic [forums](https://discuss.elastic.co).
|
||||||
Bug reports on an OS that we do not support or feature requests
|
GitHub is not the place for general questions.
|
||||||
specific to an OS that we do not support will be closed.
|
|
||||||
|
2. Is this bug report or feature request for a supported OS? If not, it
|
||||||
|
is likely to be closed. See https://www.elastic.co/support/matrix#show_os
|
||||||
|
|
||||||
|
3. Please fill out EITHER the feature request block or the bug report block
|
||||||
|
below, and delete the other block.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
<!--
|
<!-- Feature request -->
|
||||||
If you are filing a bug report, please remove the below feature
|
|
||||||
request block and provide responses for all of the below items.
|
**Describe the feature**:
|
||||||
-->
|
|
||||||
|
<!-- Bug report -->
|
||||||
|
|
||||||
**Elasticsearch version**:
|
**Elasticsearch version**:
|
||||||
|
|
||||||
**Plugins installed**: []
|
**Plugins installed**: []
|
||||||
|
|
||||||
**JVM version**:
|
**JVM version** (`java -version`):
|
||||||
|
|
||||||
**OS version**:
|
**OS version** (`uname -a` if on a Unix-like system):
|
||||||
|
|
||||||
**Description of the problem including expected versus actual behavior**:
|
**Description of the problem including expected versus actual behavior**:
|
||||||
|
|
||||||
**Steps to reproduce**:
|
**Steps to reproduce**:
|
||||||
|
|
||||||
|
Please include a *minimal* but *complete* recreation of the problem, including
|
||||||
|
(e.g.) index creation, mappings, settings, query etc. The easier you make for
|
||||||
|
us to reproduce it, the more likely that somebody will take the time to look at it.
|
||||||
|
|
||||||
1.
|
1.
|
||||||
2.
|
2.
|
||||||
3.
|
3.
|
||||||
|
|
||||||
**Provide logs (if relevant)**:
|
**Provide logs (if relevant)**:
|
||||||
|
|
||||||
<!--
|
|
||||||
If you are filing a feature request, please remove the above bug
|
|
||||||
report block and provide responses for all of the below items.
|
|
||||||
-->
|
|
||||||
|
|
||||||
**Describe the feature**:
|
|
||||||
|
@ -350,7 +350,7 @@ These are the linux flavors the Vagrantfile currently supports:
|
|||||||
* debian-8 aka jessie, the current debian stable distribution
|
* debian-8 aka jessie, the current debian stable distribution
|
||||||
* centos-6
|
* centos-6
|
||||||
* centos-7
|
* centos-7
|
||||||
* fedora-24
|
* fedora-25
|
||||||
* oel-6 aka Oracle Enterprise Linux 6
|
* oel-6 aka Oracle Enterprise Linux 6
|
||||||
* oel-7 aka Oracle Enterprise Linux 7
|
* oel-7 aka Oracle Enterprise Linux 7
|
||||||
* sles-12
|
* sles-12
|
||||||
@ -426,23 +426,23 @@ sudo -E bats $BATS_TESTS/*.bats
|
|||||||
You can also use Gradle to prepare the test environment and then starts a single VM:
|
You can also use Gradle to prepare the test environment and then starts a single VM:
|
||||||
|
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
gradle vagrantFedora24#up
|
gradle vagrantFedora25#up
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
||||||
Or any of vagrantCentos6#up, vagrantDebian8#up, vagrantFedora24#up, vagrantOel6#up,
|
Or any of vagrantCentos6#up, vagrantCentos7#up, vagrantDebian8#up,
|
||||||
vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1404#up,
|
vagrantFedora25#up, vagrantOel6#up, vagrantOel7#up, vagrantOpensuse13#up,
|
||||||
vagrantUbuntu1604#up.
|
vagrantSles12#up, vagrantUbuntu1404#up, vagrantUbuntu1604#up.
|
||||||
|
|
||||||
Once up, you can then connect to the VM using SSH from the elasticsearch directory:
|
Once up, you can then connect to the VM using SSH from the elasticsearch directory:
|
||||||
|
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
vagrant ssh fedora-24
|
vagrant ssh fedora-25
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
||||||
Or from another directory:
|
Or from another directory:
|
||||||
|
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-24
|
VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-25
|
||||||
-------------------------------------------------
|
-------------------------------------------------
|
||||||
|
|
||||||
Note: Starting vagrant VM outside of the elasticsearch folder requires to
|
Note: Starting vagrant VM outside of the elasticsearch folder requires to
|
||||||
|
4
Vagrantfile
vendored
4
Vagrantfile
vendored
@ -56,8 +56,8 @@ Vagrant.configure(2) do |config|
|
|||||||
config.vm.box = "elastic/oraclelinux-7-x86_64"
|
config.vm.box = "elastic/oraclelinux-7-x86_64"
|
||||||
rpm_common config
|
rpm_common config
|
||||||
end
|
end
|
||||||
config.vm.define "fedora-24" do |config|
|
config.vm.define "fedora-25" do |config|
|
||||||
config.vm.box = "elastic/fedora-24-x86_64"
|
config.vm.box = "elastic/fedora-25-x86_64"
|
||||||
dnf_common config
|
dnf_common config
|
||||||
end
|
end
|
||||||
config.vm.define "opensuse-13" do |config|
|
config.vm.define "opensuse-13" do |config|
|
||||||
|
@ -36,8 +36,6 @@ import org.elasticsearch.common.util.set.Sets;
|
|||||||
import org.elasticsearch.gateway.GatewayAllocator;
|
import org.elasticsearch.gateway.GatewayAllocator;
|
||||||
|
|
||||||
import java.lang.reflect.InvocationTargetException;
|
import java.lang.reflect.InvocationTargetException;
|
||||||
import java.net.InetAddress;
|
|
||||||
import java.net.UnknownHostException;
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
@ -49,7 +47,7 @@ public final class Allocators {
|
|||||||
public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator();
|
public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator();
|
||||||
|
|
||||||
protected NoopGatewayAllocator() {
|
protected NoopGatewayAllocator() {
|
||||||
super(Settings.EMPTY, null, null);
|
super(Settings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -459,27 +459,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||||||
// TODO: why are we not passing maxmemory to junit4?
|
// TODO: why are we not passing maxmemory to junit4?
|
||||||
jvmArg '-Xmx' + System.getProperty('tests.heap.size', '512m')
|
jvmArg '-Xmx' + System.getProperty('tests.heap.size', '512m')
|
||||||
jvmArg '-Xms' + System.getProperty('tests.heap.size', '512m')
|
jvmArg '-Xms' + System.getProperty('tests.heap.size', '512m')
|
||||||
if (JavaVersion.current().isJava7()) {
|
|
||||||
// some tests need a large permgen, but that only exists on java 7
|
|
||||||
jvmArg '-XX:MaxPermSize=128m'
|
|
||||||
}
|
|
||||||
jvmArg '-XX:MaxDirectMemorySize=512m'
|
|
||||||
jvmArg '-XX:+HeapDumpOnOutOfMemoryError'
|
jvmArg '-XX:+HeapDumpOnOutOfMemoryError'
|
||||||
File heapdumpDir = new File(project.buildDir, 'heapdump')
|
File heapdumpDir = new File(project.buildDir, 'heapdump')
|
||||||
heapdumpDir.mkdirs()
|
heapdumpDir.mkdirs()
|
||||||
jvmArg '-XX:HeapDumpPath=' + heapdumpDir
|
jvmArg '-XX:HeapDumpPath=' + heapdumpDir
|
||||||
/*
|
argLine System.getProperty('tests.jvm.argline')
|
||||||
* We only want to append -XX:-OmitStackTraceInFastThrow if a flag for OmitStackTraceInFastThrow is not already included in
|
|
||||||
* tests.jvm.argline.
|
|
||||||
*/
|
|
||||||
final String testsJvmArgline = System.getProperty('tests.jvm.argline')
|
|
||||||
if (testsJvmArgline == null) {
|
|
||||||
argLine '-XX:-OmitStackTraceInFastThrow'
|
|
||||||
} else if (testsJvmArgline.indexOf("OmitStackTraceInFastThrow") < 0) {
|
|
||||||
argLine testsJvmArgline.trim() + ' ' + '-XX:-OmitStackTraceInFastThrow'
|
|
||||||
} else {
|
|
||||||
argLine testsJvmArgline.trim()
|
|
||||||
}
|
|
||||||
|
|
||||||
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
// we use './temp' since this is per JVM and tests are forbidden from writing to CWD
|
||||||
systemProperty 'java.io.tmpdir', './temp'
|
systemProperty 'java.io.tmpdir', './temp'
|
||||||
|
@ -0,0 +1,291 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.gradle.test
|
||||||
|
|
||||||
|
import org.apache.tools.ant.taskdefs.condition.Os
|
||||||
|
import org.elasticsearch.gradle.AntTask
|
||||||
|
import org.elasticsearch.gradle.LoggedExec
|
||||||
|
import org.gradle.api.GradleException
|
||||||
|
import org.gradle.api.Task
|
||||||
|
import org.gradle.api.tasks.Exec
|
||||||
|
import org.gradle.api.tasks.Input
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A fixture for integration tests which runs in a separate process launched by Ant.
|
||||||
|
*/
|
||||||
|
public class AntFixture extends AntTask implements Fixture {
|
||||||
|
|
||||||
|
/** The path to the executable that starts the fixture. */
|
||||||
|
@Input
|
||||||
|
String executable
|
||||||
|
|
||||||
|
private final List<Object> arguments = new ArrayList<>()
|
||||||
|
|
||||||
|
@Input
|
||||||
|
public void args(Object... args) {
|
||||||
|
arguments.addAll(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Environment variables for the fixture process. The value can be any object, which
|
||||||
|
* will have toString() called at execution time.
|
||||||
|
*/
|
||||||
|
private final Map<String, Object> environment = new HashMap<>()
|
||||||
|
|
||||||
|
@Input
|
||||||
|
public void env(String key, Object value) {
|
||||||
|
environment.put(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** A flag to indicate whether the command should be executed from a shell. */
|
||||||
|
@Input
|
||||||
|
boolean useShell = false
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A flag to indicate whether the fixture should be run in the foreground, or spawned.
|
||||||
|
* It is protected so subclasses can override (eg RunTask).
|
||||||
|
*/
|
||||||
|
protected boolean spawn = true
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A closure to call before the fixture is considered ready. The closure is passed the fixture object,
|
||||||
|
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||||
|
* condition is for http on the http port.
|
||||||
|
*/
|
||||||
|
@Input
|
||||||
|
Closure waitCondition = { AntFixture fixture, AntBuilder ant ->
|
||||||
|
File tmpFile = new File(fixture.cwd, 'wait.success')
|
||||||
|
ant.get(src: "http://${fixture.addressAndPort}",
|
||||||
|
dest: tmpFile.toString(),
|
||||||
|
ignoreerrors: true, // do not fail on error, so logging information can be flushed
|
||||||
|
retries: 10)
|
||||||
|
return tmpFile.exists()
|
||||||
|
}
|
||||||
|
|
||||||
|
private final Task stopTask
|
||||||
|
|
||||||
|
public AntFixture() {
|
||||||
|
stopTask = createStopTask()
|
||||||
|
finalizedBy(stopTask)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Task getStopTask() {
|
||||||
|
return stopTask
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void runAnt(AntBuilder ant) {
|
||||||
|
project.delete(baseDir) // reset everything
|
||||||
|
cwd.mkdirs()
|
||||||
|
final String realExecutable
|
||||||
|
final List<Object> realArgs = new ArrayList<>()
|
||||||
|
final Map<String, Object> realEnv = environment
|
||||||
|
// We need to choose which executable we are using. In shell mode, or when we
|
||||||
|
// are spawning and thus using the wrapper script, the executable is the shell.
|
||||||
|
if (useShell || spawn) {
|
||||||
|
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||||
|
realExecutable = 'cmd'
|
||||||
|
realArgs.add('/C')
|
||||||
|
realArgs.add('"') // quote the entire command
|
||||||
|
} else {
|
||||||
|
realExecutable = 'sh'
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
realExecutable = executable
|
||||||
|
realArgs.addAll(arguments)
|
||||||
|
}
|
||||||
|
if (spawn) {
|
||||||
|
writeWrapperScript(executable)
|
||||||
|
realArgs.add(wrapperScript)
|
||||||
|
realArgs.addAll(arguments)
|
||||||
|
}
|
||||||
|
if (Os.isFamily(Os.FAMILY_WINDOWS) && (useShell || spawn)) {
|
||||||
|
realArgs.add('"')
|
||||||
|
}
|
||||||
|
commandString.eachLine { line -> logger.info(line) }
|
||||||
|
|
||||||
|
ant.exec(executable: realExecutable, spawn: spawn, dir: cwd, taskname: name) {
|
||||||
|
realEnv.each { key, value -> env(key: key, value: value) }
|
||||||
|
realArgs.each { arg(value: it) }
|
||||||
|
}
|
||||||
|
|
||||||
|
String failedProp = "failed${name}"
|
||||||
|
// first wait for resources, or the failure marker from the wrapper script
|
||||||
|
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
|
||||||
|
or {
|
||||||
|
resourceexists {
|
||||||
|
file(file: failureMarker.toString())
|
||||||
|
}
|
||||||
|
and {
|
||||||
|
resourceexists {
|
||||||
|
file(file: pidFile.toString())
|
||||||
|
}
|
||||||
|
resourceexists {
|
||||||
|
file(file: portsFile.toString())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ant.project.getProperty(failedProp) || failureMarker.exists()) {
|
||||||
|
fail("Failed to start ${name}")
|
||||||
|
}
|
||||||
|
|
||||||
|
// the process is started (has a pid) and is bound to a network interface
|
||||||
|
// so now wait undil the waitCondition has been met
|
||||||
|
// TODO: change this to a loop?
|
||||||
|
boolean success
|
||||||
|
try {
|
||||||
|
success = waitCondition(this, ant) == false
|
||||||
|
} catch (Exception e) {
|
||||||
|
String msg = "Wait condition caught exception for ${name}"
|
||||||
|
logger.error(msg, e)
|
||||||
|
fail(msg, e)
|
||||||
|
}
|
||||||
|
if (success == false) {
|
||||||
|
fail("Wait condition failed for ${name}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns a debug string used to log information about how the fixture was run. */
|
||||||
|
protected String getCommandString() {
|
||||||
|
String commandString = "\n${name} configuration:\n"
|
||||||
|
commandString += "-----------------------------------------\n"
|
||||||
|
commandString += " cwd: ${cwd}\n"
|
||||||
|
commandString += " command: ${executable} ${arguments.join(' ')}\n"
|
||||||
|
commandString += ' environment:\n'
|
||||||
|
environment.each { k, v -> commandString += " ${k}: ${v}\n" }
|
||||||
|
if (spawn) {
|
||||||
|
commandString += "\n [${wrapperScript.name}]\n"
|
||||||
|
wrapperScript.eachLine('UTF-8', { line -> commandString += " ${line}\n"})
|
||||||
|
}
|
||||||
|
return commandString
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes a script to run the real executable, so that stdout/stderr can be captured.
|
||||||
|
* TODO: this could be removed if we do use our own ProcessBuilder and pump output from the process
|
||||||
|
*/
|
||||||
|
private void writeWrapperScript(String executable) {
|
||||||
|
wrapperScript.parentFile.mkdirs()
|
||||||
|
String argsPasser = '"$@"'
|
||||||
|
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||||
|
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||||
|
argsPasser = '%*'
|
||||||
|
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||||
|
}
|
||||||
|
wrapperScript.setText("\"${executable}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Fail the build with the given message, and logging relevant info*/
|
||||||
|
private void fail(String msg, Exception... suppressed) {
|
||||||
|
if (logger.isInfoEnabled() == false) {
|
||||||
|
// We already log the command at info level. No need to do it twice.
|
||||||
|
commandString.eachLine { line -> logger.error(line) }
|
||||||
|
}
|
||||||
|
logger.error("${name} output:")
|
||||||
|
logger.error("-----------------------------------------")
|
||||||
|
logger.error(" failure marker exists: ${failureMarker.exists()}")
|
||||||
|
logger.error(" pid file exists: ${pidFile.exists()}")
|
||||||
|
logger.error(" ports file exists: ${portsFile.exists()}")
|
||||||
|
// also dump the log file for the startup script (which will include ES logging output to stdout)
|
||||||
|
if (runLog.exists()) {
|
||||||
|
logger.error("\n [log]")
|
||||||
|
runLog.eachLine { line -> logger.error(" ${line}") }
|
||||||
|
}
|
||||||
|
logger.error("-----------------------------------------")
|
||||||
|
GradleException toThrow = new GradleException(msg)
|
||||||
|
for (Exception e : suppressed) {
|
||||||
|
toThrow.addSuppressed(e)
|
||||||
|
}
|
||||||
|
throw toThrow
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Adds a task to kill an elasticsearch node with the given pidfile */
|
||||||
|
private Task createStopTask() {
|
||||||
|
final AntFixture fixture = this
|
||||||
|
final Object pid = "${ -> fixture.pid }"
|
||||||
|
Exec stop = project.tasks.create(name: "${name}#stop", type: LoggedExec)
|
||||||
|
stop.onlyIf { fixture.pidFile.exists() }
|
||||||
|
stop.doFirst {
|
||||||
|
logger.info("Shutting down ${fixture.name} with pid ${pid}")
|
||||||
|
}
|
||||||
|
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||||
|
stop.executable = 'Taskkill'
|
||||||
|
stop.args('/PID', pid, '/F')
|
||||||
|
} else {
|
||||||
|
stop.executable = 'kill'
|
||||||
|
stop.args('-9', pid)
|
||||||
|
}
|
||||||
|
stop.doLast {
|
||||||
|
project.delete(fixture.pidFile)
|
||||||
|
}
|
||||||
|
return stop
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A path relative to the build dir that all configuration and runtime files
|
||||||
|
* will live in for this fixture
|
||||||
|
*/
|
||||||
|
protected File getBaseDir() {
|
||||||
|
return new File(project.buildDir, "fixtures/${name}")
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns the working directory for the process. Defaults to "cwd" inside baseDir. */
|
||||||
|
protected File getCwd() {
|
||||||
|
return new File(baseDir, 'cwd')
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns the file the process writes its pid to. Defaults to "pid" inside baseDir. */
|
||||||
|
protected File getPidFile() {
|
||||||
|
return new File(baseDir, 'pid')
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Reads the pid file and returns the process' pid */
|
||||||
|
public int getPid() {
|
||||||
|
return Integer.parseInt(pidFile.getText('UTF-8').trim())
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns the file the process writes its bound ports to. Defaults to "ports" inside baseDir. */
|
||||||
|
protected File getPortsFile() {
|
||||||
|
return new File(baseDir, 'ports')
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns an address and port suitable for a uri to connect to this node over http */
|
||||||
|
public String getAddressAndPort() {
|
||||||
|
return portsFile.readLines("UTF-8").get(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns a file that wraps around the actual command when {@code spawn == true}. */
|
||||||
|
protected File getWrapperScript() {
|
||||||
|
return new File(cwd, Os.isFamily(Os.FAMILY_WINDOWS) ? 'run.bat' : 'run')
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns a file that the wrapper script writes when the command failed. */
|
||||||
|
protected File getFailureMarker() {
|
||||||
|
return new File(cwd, 'run.failed')
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Returns a file that the wrapper script writes when the command failed. */
|
||||||
|
protected File getRunLog() {
|
||||||
|
return new File(cwd, 'run.log')
|
||||||
|
}
|
||||||
|
}
|
@ -208,7 +208,7 @@ class ClusterFormationTasks {
|
|||||||
start.finalizedBy(stop)
|
start.finalizedBy(stop)
|
||||||
for (Object dependency : config.dependencies) {
|
for (Object dependency : config.dependencies) {
|
||||||
if (dependency instanceof Fixture) {
|
if (dependency instanceof Fixture) {
|
||||||
Task depStop = ((Fixture)dependency).stopTask
|
def depStop = ((Fixture)dependency).stopTask
|
||||||
runner.finalizedBy(depStop)
|
runner.finalizedBy(depStop)
|
||||||
start.finalizedBy(depStop)
|
start.finalizedBy(depStop)
|
||||||
}
|
}
|
||||||
|
@ -16,272 +16,15 @@
|
|||||||
* specific language governing permissions and limitations
|
* specific language governing permissions and limitations
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.gradle.test
|
package org.elasticsearch.gradle.test
|
||||||
|
|
||||||
import org.apache.tools.ant.taskdefs.condition.Os
|
|
||||||
import org.elasticsearch.gradle.AntTask
|
|
||||||
import org.elasticsearch.gradle.LoggedExec
|
|
||||||
import org.gradle.api.GradleException
|
|
||||||
import org.gradle.api.Task
|
|
||||||
import org.gradle.api.tasks.Exec
|
|
||||||
import org.gradle.api.tasks.Input
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A fixture for integration tests which runs in a separate process.
|
* Any object that can produce an accompanying stop task, meant to tear down
|
||||||
|
* a previously instantiated service.
|
||||||
*/
|
*/
|
||||||
public class Fixture extends AntTask {
|
public interface Fixture {
|
||||||
|
|
||||||
/** The path to the executable that starts the fixture. */
|
|
||||||
@Input
|
|
||||||
String executable
|
|
||||||
|
|
||||||
private final List<Object> arguments = new ArrayList<>()
|
|
||||||
|
|
||||||
@Input
|
|
||||||
public void args(Object... args) {
|
|
||||||
arguments.addAll(args)
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Environment variables for the fixture process. The value can be any object, which
|
|
||||||
* will have toString() called at execution time.
|
|
||||||
*/
|
|
||||||
private final Map<String, Object> environment = new HashMap<>()
|
|
||||||
|
|
||||||
@Input
|
|
||||||
public void env(String key, Object value) {
|
|
||||||
environment.put(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
/** A flag to indicate whether the command should be executed from a shell. */
|
|
||||||
@Input
|
|
||||||
boolean useShell = false
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A flag to indicate whether the fixture should be run in the foreground, or spawned.
|
|
||||||
* It is protected so subclasses can override (eg RunTask).
|
|
||||||
*/
|
|
||||||
protected boolean spawn = true
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A closure to call before the fixture is considered ready. The closure is passed the fixture object,
|
|
||||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
|
||||||
* condition is for http on the http port.
|
|
||||||
*/
|
|
||||||
@Input
|
|
||||||
Closure waitCondition = { Fixture fixture, AntBuilder ant ->
|
|
||||||
File tmpFile = new File(fixture.cwd, 'wait.success')
|
|
||||||
ant.get(src: "http://${fixture.addressAndPort}",
|
|
||||||
dest: tmpFile.toString(),
|
|
||||||
ignoreerrors: true, // do not fail on error, so logging information can be flushed
|
|
||||||
retries: 10)
|
|
||||||
return tmpFile.exists()
|
|
||||||
}
|
|
||||||
|
|
||||||
/** A task which will stop this fixture. This should be used as a finalizedBy for any tasks that use the fixture. */
|
/** A task which will stop this fixture. This should be used as a finalizedBy for any tasks that use the fixture. */
|
||||||
public final Task stopTask
|
public Object getStopTask()
|
||||||
|
|
||||||
public Fixture() {
|
|
||||||
stopTask = createStopTask()
|
|
||||||
finalizedBy(stopTask)
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void runAnt(AntBuilder ant) {
|
|
||||||
project.delete(baseDir) // reset everything
|
|
||||||
cwd.mkdirs()
|
|
||||||
final String realExecutable
|
|
||||||
final List<Object> realArgs = new ArrayList<>()
|
|
||||||
final Map<String, Object> realEnv = environment
|
|
||||||
// We need to choose which executable we are using. In shell mode, or when we
|
|
||||||
// are spawning and thus using the wrapper script, the executable is the shell.
|
|
||||||
if (useShell || spawn) {
|
|
||||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
|
||||||
realExecutable = 'cmd'
|
|
||||||
realArgs.add('/C')
|
|
||||||
realArgs.add('"') // quote the entire command
|
|
||||||
} else {
|
|
||||||
realExecutable = 'sh'
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
realExecutable = executable
|
|
||||||
realArgs.addAll(arguments)
|
|
||||||
}
|
|
||||||
if (spawn) {
|
|
||||||
writeWrapperScript(executable)
|
|
||||||
realArgs.add(wrapperScript)
|
|
||||||
realArgs.addAll(arguments)
|
|
||||||
}
|
|
||||||
if (Os.isFamily(Os.FAMILY_WINDOWS) && (useShell || spawn)) {
|
|
||||||
realArgs.add('"')
|
|
||||||
}
|
|
||||||
commandString.eachLine { line -> logger.info(line) }
|
|
||||||
|
|
||||||
ant.exec(executable: realExecutable, spawn: spawn, dir: cwd, taskname: name) {
|
|
||||||
realEnv.each { key, value -> env(key: key, value: value) }
|
|
||||||
realArgs.each { arg(value: it) }
|
|
||||||
}
|
|
||||||
|
|
||||||
String failedProp = "failed${name}"
|
|
||||||
// first wait for resources, or the failure marker from the wrapper script
|
|
||||||
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
|
|
||||||
or {
|
|
||||||
resourceexists {
|
|
||||||
file(file: failureMarker.toString())
|
|
||||||
}
|
|
||||||
and {
|
|
||||||
resourceexists {
|
|
||||||
file(file: pidFile.toString())
|
|
||||||
}
|
|
||||||
resourceexists {
|
|
||||||
file(file: portsFile.toString())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ant.project.getProperty(failedProp) || failureMarker.exists()) {
|
|
||||||
fail("Failed to start ${name}")
|
|
||||||
}
|
|
||||||
|
|
||||||
// the process is started (has a pid) and is bound to a network interface
|
|
||||||
// so now wait undil the waitCondition has been met
|
|
||||||
// TODO: change this to a loop?
|
|
||||||
boolean success
|
|
||||||
try {
|
|
||||||
success = waitCondition(this, ant) == false
|
|
||||||
} catch (Exception e) {
|
|
||||||
String msg = "Wait condition caught exception for ${name}"
|
|
||||||
logger.error(msg, e)
|
|
||||||
fail(msg, e)
|
|
||||||
}
|
|
||||||
if (success == false) {
|
|
||||||
fail("Wait condition failed for ${name}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns a debug string used to log information about how the fixture was run. */
|
|
||||||
protected String getCommandString() {
|
|
||||||
String commandString = "\n${name} configuration:\n"
|
|
||||||
commandString += "-----------------------------------------\n"
|
|
||||||
commandString += " cwd: ${cwd}\n"
|
|
||||||
commandString += " command: ${executable} ${arguments.join(' ')}\n"
|
|
||||||
commandString += ' environment:\n'
|
|
||||||
environment.each { k, v -> commandString += " ${k}: ${v}\n" }
|
|
||||||
if (spawn) {
|
|
||||||
commandString += "\n [${wrapperScript.name}]\n"
|
|
||||||
wrapperScript.eachLine('UTF-8', { line -> commandString += " ${line}\n"})
|
|
||||||
}
|
|
||||||
return commandString
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Writes a script to run the real executable, so that stdout/stderr can be captured.
|
|
||||||
* TODO: this could be removed if we do use our own ProcessBuilder and pump output from the process
|
|
||||||
*/
|
|
||||||
private void writeWrapperScript(String executable) {
|
|
||||||
wrapperScript.parentFile.mkdirs()
|
|
||||||
String argsPasser = '"$@"'
|
|
||||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
|
||||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
|
||||||
argsPasser = '%*'
|
|
||||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
|
||||||
}
|
|
||||||
wrapperScript.setText("\"${executable}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Fail the build with the given message, and logging relevant info*/
|
|
||||||
private void fail(String msg, Exception... suppressed) {
|
|
||||||
if (logger.isInfoEnabled() == false) {
|
|
||||||
// We already log the command at info level. No need to do it twice.
|
|
||||||
commandString.eachLine { line -> logger.error(line) }
|
|
||||||
}
|
|
||||||
logger.error("${name} output:")
|
|
||||||
logger.error("-----------------------------------------")
|
|
||||||
logger.error(" failure marker exists: ${failureMarker.exists()}")
|
|
||||||
logger.error(" pid file exists: ${pidFile.exists()}")
|
|
||||||
logger.error(" ports file exists: ${portsFile.exists()}")
|
|
||||||
// also dump the log file for the startup script (which will include ES logging output to stdout)
|
|
||||||
if (runLog.exists()) {
|
|
||||||
logger.error("\n [log]")
|
|
||||||
runLog.eachLine { line -> logger.error(" ${line}") }
|
|
||||||
}
|
|
||||||
logger.error("-----------------------------------------")
|
|
||||||
GradleException toThrow = new GradleException(msg)
|
|
||||||
for (Exception e : suppressed) {
|
|
||||||
toThrow.addSuppressed(e)
|
|
||||||
}
|
|
||||||
throw toThrow
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Adds a task to kill an elasticsearch node with the given pidfile */
|
|
||||||
private Task createStopTask() {
|
|
||||||
final Fixture fixture = this
|
|
||||||
final Object pid = "${ -> fixture.pid }"
|
|
||||||
Exec stop = project.tasks.create(name: "${name}#stop", type: LoggedExec)
|
|
||||||
stop.onlyIf { fixture.pidFile.exists() }
|
|
||||||
stop.doFirst {
|
|
||||||
logger.info("Shutting down ${fixture.name} with pid ${pid}")
|
|
||||||
}
|
|
||||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
|
||||||
stop.executable = 'Taskkill'
|
|
||||||
stop.args('/PID', pid, '/F')
|
|
||||||
} else {
|
|
||||||
stop.executable = 'kill'
|
|
||||||
stop.args('-9', pid)
|
|
||||||
}
|
|
||||||
stop.doLast {
|
|
||||||
project.delete(fixture.pidFile)
|
|
||||||
}
|
|
||||||
return stop
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A path relative to the build dir that all configuration and runtime files
|
|
||||||
* will live in for this fixture
|
|
||||||
*/
|
|
||||||
protected File getBaseDir() {
|
|
||||||
return new File(project.buildDir, "fixtures/${name}")
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the working directory for the process. Defaults to "cwd" inside baseDir. */
|
|
||||||
protected File getCwd() {
|
|
||||||
return new File(baseDir, 'cwd')
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the file the process writes its pid to. Defaults to "pid" inside baseDir. */
|
|
||||||
protected File getPidFile() {
|
|
||||||
return new File(baseDir, 'pid')
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Reads the pid file and returns the process' pid */
|
|
||||||
public int getPid() {
|
|
||||||
return Integer.parseInt(pidFile.getText('UTF-8').trim())
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns the file the process writes its bound ports to. Defaults to "ports" inside baseDir. */
|
|
||||||
protected File getPortsFile() {
|
|
||||||
return new File(baseDir, 'ports')
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns an address and port suitable for a uri to connect to this node over http */
|
|
||||||
public String getAddressAndPort() {
|
|
||||||
return portsFile.readLines("UTF-8").get(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns a file that wraps around the actual command when {@code spawn == true}. */
|
|
||||||
protected File getWrapperScript() {
|
|
||||||
return new File(cwd, Os.isFamily(Os.FAMILY_WINDOWS) ? 'run.bat' : 'run')
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns a file that the wrapper script writes when the command failed. */
|
|
||||||
protected File getFailureMarker() {
|
|
||||||
return new File(cwd, 'run.failed')
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns a file that the wrapper script writes when the command failed. */
|
|
||||||
protected File getRunLog() {
|
|
||||||
return new File(cwd, 'run.log')
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ public class RestIntegTestTask extends DefaultTask {
|
|||||||
runner.dependsOn(dependencies)
|
runner.dependsOn(dependencies)
|
||||||
for (Object dependency : dependencies) {
|
for (Object dependency : dependencies) {
|
||||||
if (dependency instanceof Fixture) {
|
if (dependency instanceof Fixture) {
|
||||||
runner.finalizedBy(((Fixture)dependency).stopTask)
|
runner.finalizedBy(((Fixture)dependency).getStopTask())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return this
|
return this
|
||||||
@ -140,7 +140,7 @@ public class RestIntegTestTask extends DefaultTask {
|
|||||||
runner.setDependsOn(dependencies)
|
runner.setDependsOn(dependencies)
|
||||||
for (Object dependency : dependencies) {
|
for (Object dependency : dependencies) {
|
||||||
if (dependency instanceof Fixture) {
|
if (dependency instanceof Fixture) {
|
||||||
runner.finalizedBy(((Fixture)dependency).stopTask)
|
runner.finalizedBy(((Fixture)dependency).getStopTask())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,54 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.gradle.test
|
||||||
|
|
||||||
|
import org.elasticsearch.gradle.vagrant.VagrantCommandTask
|
||||||
|
import org.gradle.api.Task
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A fixture for integration tests which runs in a virtual machine launched by Vagrant.
|
||||||
|
*/
|
||||||
|
class VagrantFixture extends VagrantCommandTask implements Fixture {
|
||||||
|
|
||||||
|
private VagrantCommandTask stopTask
|
||||||
|
|
||||||
|
public VagrantFixture() {
|
||||||
|
this.stopTask = project.tasks.create(name: "${name}#stop", type: VagrantCommandTask) {
|
||||||
|
command 'halt'
|
||||||
|
}
|
||||||
|
finalizedBy this.stopTask
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void setBoxName(String boxName) {
|
||||||
|
super.setBoxName(boxName)
|
||||||
|
this.stopTask.setBoxName(boxName)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void setEnvironmentVars(Map<String, String> environmentVars) {
|
||||||
|
super.setEnvironmentVars(environmentVars)
|
||||||
|
this.stopTask.setEnvironmentVars(environmentVars)
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Task getStopTask() {
|
||||||
|
return this.stopTask
|
||||||
|
}
|
||||||
|
}
|
@ -27,12 +27,15 @@ import org.gradle.api.tasks.Input
|
|||||||
public class BatsOverVagrantTask extends VagrantCommandTask {
|
public class BatsOverVagrantTask extends VagrantCommandTask {
|
||||||
|
|
||||||
@Input
|
@Input
|
||||||
String command
|
String remoteCommand
|
||||||
|
|
||||||
BatsOverVagrantTask() {
|
BatsOverVagrantTask() {
|
||||||
project.afterEvaluate {
|
command = 'ssh'
|
||||||
args 'ssh', boxName, '--command', command
|
}
|
||||||
}
|
|
||||||
|
void setRemoteCommand(String remoteCommand) {
|
||||||
|
this.remoteCommand = Objects.requireNonNull(remoteCommand)
|
||||||
|
setArgs(['--command', remoteCommand])
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -21,9 +21,15 @@ package org.elasticsearch.gradle.vagrant
|
|||||||
import org.apache.commons.io.output.TeeOutputStream
|
import org.apache.commons.io.output.TeeOutputStream
|
||||||
import org.elasticsearch.gradle.LoggedExec
|
import org.elasticsearch.gradle.LoggedExec
|
||||||
import org.gradle.api.tasks.Input
|
import org.gradle.api.tasks.Input
|
||||||
|
import org.gradle.api.tasks.Optional
|
||||||
|
import org.gradle.api.tasks.TaskAction
|
||||||
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
import org.gradle.internal.logging.progress.ProgressLoggerFactory
|
||||||
|
|
||||||
import javax.inject.Inject
|
import javax.inject.Inject
|
||||||
|
import java.util.concurrent.CountDownLatch
|
||||||
|
import java.util.concurrent.locks.Lock
|
||||||
|
import java.util.concurrent.locks.ReadWriteLock
|
||||||
|
import java.util.concurrent.locks.ReentrantLock
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Runs a vagrant command. Pretty much like Exec task but with a nicer output
|
* Runs a vagrant command. Pretty much like Exec task but with a nicer output
|
||||||
@ -31,6 +37,12 @@ import javax.inject.Inject
|
|||||||
*/
|
*/
|
||||||
public class VagrantCommandTask extends LoggedExec {
|
public class VagrantCommandTask extends LoggedExec {
|
||||||
|
|
||||||
|
@Input
|
||||||
|
String command
|
||||||
|
|
||||||
|
@Input @Optional
|
||||||
|
String subcommand
|
||||||
|
|
||||||
@Input
|
@Input
|
||||||
String boxName
|
String boxName
|
||||||
|
|
||||||
@ -40,11 +52,27 @@ public class VagrantCommandTask extends LoggedExec {
|
|||||||
public VagrantCommandTask() {
|
public VagrantCommandTask() {
|
||||||
executable = 'vagrant'
|
executable = 'vagrant'
|
||||||
|
|
||||||
|
// We're using afterEvaluate here to slot in some logic that captures configurations and
|
||||||
|
// modifies the command line right before the main execution happens. The reason that we
|
||||||
|
// call doFirst instead of just doing the work in the afterEvaluate is that the latter
|
||||||
|
// restricts how subclasses can extend functionality. Calling afterEvaluate is like having
|
||||||
|
// all the logic of a task happening at construction time, instead of at execution time
|
||||||
|
// where a subclass can override or extend the logic.
|
||||||
project.afterEvaluate {
|
project.afterEvaluate {
|
||||||
// It'd be nice if --machine-readable were, well, nice
|
doFirst {
|
||||||
standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream())
|
if (environmentVars != null) {
|
||||||
if (environmentVars != null) {
|
environment environmentVars
|
||||||
environment environmentVars
|
}
|
||||||
|
|
||||||
|
// Build our command line for vagrant
|
||||||
|
def vagrantCommand = [executable, command]
|
||||||
|
if (subcommand != null) {
|
||||||
|
vagrantCommand = vagrantCommand + subcommand
|
||||||
|
}
|
||||||
|
commandLine([*vagrantCommand, boxName, *args])
|
||||||
|
|
||||||
|
// It'd be nice if --machine-readable were, well, nice
|
||||||
|
standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||||||
'centos-6',
|
'centos-6',
|
||||||
'centos-7',
|
'centos-7',
|
||||||
'debian-8',
|
'debian-8',
|
||||||
'fedora-24',
|
'fedora-25',
|
||||||
'oel-6',
|
'oel-6',
|
||||||
'oel-7',
|
'oel-7',
|
||||||
'opensuse-13',
|
'opensuse-13',
|
||||||
@ -391,21 +391,23 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||||||
|
|
||||||
// always add a halt task for all boxes, so clean makes sure they are all shutdown
|
// always add a halt task for all boxes, so clean makes sure they are all shutdown
|
||||||
Task halt = project.tasks.create("vagrant${boxTask}#halt", VagrantCommandTask) {
|
Task halt = project.tasks.create("vagrant${boxTask}#halt", VagrantCommandTask) {
|
||||||
|
command 'halt'
|
||||||
boxName box
|
boxName box
|
||||||
environmentVars vagrantEnvVars
|
environmentVars vagrantEnvVars
|
||||||
args 'halt', box
|
|
||||||
}
|
}
|
||||||
stop.dependsOn(halt)
|
stop.dependsOn(halt)
|
||||||
|
|
||||||
Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) {
|
Task update = project.tasks.create("vagrant${boxTask}#update", VagrantCommandTask) {
|
||||||
|
command 'box'
|
||||||
|
subcommand 'update'
|
||||||
boxName box
|
boxName box
|
||||||
environmentVars vagrantEnvVars
|
environmentVars vagrantEnvVars
|
||||||
args 'box', 'update', box
|
|
||||||
dependsOn vagrantCheckVersion, virtualboxCheckVersion
|
dependsOn vagrantCheckVersion, virtualboxCheckVersion
|
||||||
}
|
}
|
||||||
update.mustRunAfter(setupBats)
|
update.mustRunAfter(setupBats)
|
||||||
|
|
||||||
Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) {
|
Task up = project.tasks.create("vagrant${boxTask}#up", VagrantCommandTask) {
|
||||||
|
command 'up'
|
||||||
boxName box
|
boxName box
|
||||||
environmentVars vagrantEnvVars
|
environmentVars vagrantEnvVars
|
||||||
/* Its important that we try to reprovision the box even if it already
|
/* Its important that we try to reprovision the box even if it already
|
||||||
@ -418,7 +420,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||||||
vagrant's default but its possible to change that default and folks do.
|
vagrant's default but its possible to change that default and folks do.
|
||||||
But the boxes that we use are unlikely to work properly with other
|
But the boxes that we use are unlikely to work properly with other
|
||||||
virtualization providers. Thus the lock. */
|
virtualization providers. Thus the lock. */
|
||||||
args 'up', box, '--provision', '--provider', 'virtualbox'
|
args '--provision', '--provider', 'virtualbox'
|
||||||
/* It'd be possible to check if the box is already up here and output
|
/* It'd be possible to check if the box is already up here and output
|
||||||
SKIPPED but that would require running vagrant status which is slow! */
|
SKIPPED but that would require running vagrant status which is slow! */
|
||||||
dependsOn update
|
dependsOn update
|
||||||
@ -434,11 +436,11 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||||||
vagrantSmokeTest.dependsOn(smoke)
|
vagrantSmokeTest.dependsOn(smoke)
|
||||||
|
|
||||||
Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) {
|
Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) {
|
||||||
|
remoteCommand BATS_TEST_COMMAND
|
||||||
boxName box
|
boxName box
|
||||||
environmentVars vagrantEnvVars
|
environmentVars vagrantEnvVars
|
||||||
dependsOn up, setupBats
|
dependsOn up, setupBats
|
||||||
finalizedBy halt
|
finalizedBy halt
|
||||||
command BATS_TEST_COMMAND
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() {
|
TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() {
|
||||||
@ -461,11 +463,12 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {
|
Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {
|
||||||
|
command 'ssh'
|
||||||
boxName box
|
boxName box
|
||||||
environmentVars vagrantEnvVars
|
environmentVars vagrantEnvVars
|
||||||
dependsOn up
|
dependsOn up
|
||||||
finalizedBy halt
|
finalizedBy halt
|
||||||
args 'ssh', boxName, '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.extensions.esvagrant.formattedTestSeed}"
|
args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.extensions.esvagrant.formattedTestSeed}"
|
||||||
}
|
}
|
||||||
TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() {
|
TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -80,6 +80,8 @@ public class Version implements Comparable<Version> {
|
|||||||
public static final Version V_5_3_2_UNRELEASED = new Version(V_5_3_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
public static final Version V_5_3_2_UNRELEASED = new Version(V_5_3_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||||
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
|
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
|
||||||
public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||||
|
public static final int V_5_4_1_ID_UNRELEASED = 5040199;
|
||||||
|
public static final Version V_5_4_1_UNRELEASED = new Version(V_5_4_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_1);
|
||||||
public static final int V_5_5_0_ID_UNRELEASED = 5050099;
|
public static final int V_5_5_0_ID_UNRELEASED = 5050099;
|
||||||
public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||||
@ -104,6 +106,8 @@ public class Version implements Comparable<Version> {
|
|||||||
return V_6_0_0_alpha1_UNRELEASED;
|
return V_6_0_0_alpha1_UNRELEASED;
|
||||||
case V_5_5_0_ID_UNRELEASED:
|
case V_5_5_0_ID_UNRELEASED:
|
||||||
return V_5_5_0_UNRELEASED;
|
return V_5_5_0_UNRELEASED;
|
||||||
|
case V_5_4_1_ID_UNRELEASED:
|
||||||
|
return V_5_4_1_UNRELEASED;
|
||||||
case V_5_4_0_ID_UNRELEASED:
|
case V_5_4_0_ID_UNRELEASED:
|
||||||
return V_5_4_0_UNRELEASED;
|
return V_5_4_0_UNRELEASED;
|
||||||
case V_5_3_2_ID_UNRELEASED:
|
case V_5_3_2_ID_UNRELEASED:
|
||||||
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||||||
import org.elasticsearch.search.internal.AliasFilter;
|
import org.elasticsearch.search.internal.AliasFilter;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@ -117,10 +118,14 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
|||||||
String index = entry.getKey();
|
String index = entry.getKey();
|
||||||
builder.startObject(index);
|
builder.startObject(index);
|
||||||
AliasFilter aliasFilter = entry.getValue();
|
AliasFilter aliasFilter = entry.getValue();
|
||||||
if (aliasFilter.getAliases().length > 0) {
|
String[] aliases = aliasFilter.getAliases();
|
||||||
builder.array("aliases", aliasFilter.getAliases());
|
if (aliases.length > 0) {
|
||||||
builder.field("filter");
|
Arrays.sort(aliases); // we want consistent ordering here and these values might be generated from a set / map
|
||||||
aliasFilter.getQueryBuilder().toXContent(builder, params);
|
builder.array("aliases", aliases);
|
||||||
|
if (aliasFilter.getQueryBuilder() != null) { // might be null if we include non-filtering aliases
|
||||||
|
builder.field("filter");
|
||||||
|
aliasFilter.getQueryBuilder().toXContent(builder, params);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
|
@ -83,8 +83,10 @@ public class TransportClusterSearchShardsAction extends
|
|||||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices());
|
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices());
|
||||||
Map<String, AliasFilter> indicesAndFilters = new HashMap<>();
|
Map<String, AliasFilter> indicesAndFilters = new HashMap<>();
|
||||||
for (String index : concreteIndices) {
|
for (String index : concreteIndices) {
|
||||||
AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, request.indices());
|
final AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, request.indices());
|
||||||
indicesAndFilters.put(index, aliasFilter);
|
final String[] aliases = indexNameExpressionResolver.indexAliases(clusterState, index, aliasMetadata -> true, true,
|
||||||
|
request.indices());
|
||||||
|
indicesAndFilters.put(index, new AliasFilter(aliasFilter.getQueryBuilder(), aliases));
|
||||||
}
|
}
|
||||||
|
|
||||||
Set<String> nodeIds = new HashSet<>();
|
Set<String> nodeIds = new HashSet<>();
|
||||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.fieldcaps;
|
|||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -29,7 +30,7 @@ import java.util.Map;
|
|||||||
/**
|
/**
|
||||||
* Response for {@link FieldCapabilitiesIndexRequest} requests.
|
* Response for {@link FieldCapabilitiesIndexRequest} requests.
|
||||||
*/
|
*/
|
||||||
public class FieldCapabilitiesIndexResponse extends ActionResponse {
|
public class FieldCapabilitiesIndexResponse extends ActionResponse implements Writeable {
|
||||||
private String indexName;
|
private String indexName;
|
||||||
private Map<String, FieldCapabilities> responseMap;
|
private Map<String, FieldCapabilities> responseMap;
|
||||||
|
|
||||||
@ -41,6 +42,10 @@ public class FieldCapabilitiesIndexResponse extends ActionResponse {
|
|||||||
FieldCapabilitiesIndexResponse() {
|
FieldCapabilitiesIndexResponse() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FieldCapabilitiesIndexResponse(StreamInput input) throws IOException {
|
||||||
|
this.readFrom(input);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the index name
|
* Get the index name
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.fieldcaps;
|
package org.elasticsearch.action.fieldcaps;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
@ -38,13 +39,14 @@ import java.util.Set;
|
|||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
||||||
|
|
||||||
public class FieldCapabilitiesRequest extends ActionRequest
|
public final class FieldCapabilitiesRequest extends ActionRequest implements IndicesRequest.Replaceable {
|
||||||
implements IndicesRequest.Replaceable {
|
|
||||||
public static final ParseField FIELDS_FIELD = new ParseField("fields");
|
public static final ParseField FIELDS_FIELD = new ParseField("fields");
|
||||||
public static final String NAME = "field_caps_request";
|
public static final String NAME = "field_caps_request";
|
||||||
private String[] indices = Strings.EMPTY_ARRAY;
|
private String[] indices = Strings.EMPTY_ARRAY;
|
||||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||||
private String[] fields = Strings.EMPTY_ARRAY;
|
private String[] fields = Strings.EMPTY_ARRAY;
|
||||||
|
// pkg private API mainly for cross cluster search to signal that we do multiple reductions ie. the results should not be merged
|
||||||
|
private boolean mergeResults = true;
|
||||||
|
|
||||||
private static ObjectParser<FieldCapabilitiesRequest, Void> PARSER =
|
private static ObjectParser<FieldCapabilitiesRequest, Void> PARSER =
|
||||||
new ObjectParser<>(NAME, FieldCapabilitiesRequest::new);
|
new ObjectParser<>(NAME, FieldCapabilitiesRequest::new);
|
||||||
@ -56,16 +58,39 @@ public class FieldCapabilitiesRequest extends ActionRequest
|
|||||||
|
|
||||||
public FieldCapabilitiesRequest() {}
|
public FieldCapabilitiesRequest() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns <code>true</code> iff the results should be merged.
|
||||||
|
*/
|
||||||
|
boolean isMergeResults() {
|
||||||
|
return mergeResults;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* if set to <code>true</code> the response will contain only a merged view of the per index field capabilities. Otherwise only
|
||||||
|
* unmerged per index field capabilities are returned.
|
||||||
|
*/
|
||||||
|
void setMergeResults(boolean mergeResults) {
|
||||||
|
this.mergeResults = mergeResults;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
fields = in.readStringArray();
|
fields = in.readStringArray();
|
||||||
|
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||||
|
mergeResults = in.readBoolean();
|
||||||
|
} else {
|
||||||
|
mergeResults = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
out.writeStringArray(fields);
|
out.writeStringArray(fields);
|
||||||
|
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||||
|
out.writeBoolean(mergeResults);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static FieldCapabilitiesRequest parseFields(XContentParser parser) throws IOException {
|
public static FieldCapabilitiesRequest parseFields(XContentParser parser) throws IOException {
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.action.fieldcaps;
|
package org.elasticsearch.action.fieldcaps;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
@ -27,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -34,9 +36,20 @@ import java.util.Map;
|
|||||||
*/
|
*/
|
||||||
public class FieldCapabilitiesResponse extends ActionResponse implements ToXContent {
|
public class FieldCapabilitiesResponse extends ActionResponse implements ToXContent {
|
||||||
private Map<String, Map<String, FieldCapabilities>> responseMap;
|
private Map<String, Map<String, FieldCapabilities>> responseMap;
|
||||||
|
private List<FieldCapabilitiesIndexResponse> indexResponses;
|
||||||
|
|
||||||
FieldCapabilitiesResponse(Map<String, Map<String, FieldCapabilities>> responseMap) {
|
FieldCapabilitiesResponse(Map<String, Map<String, FieldCapabilities>> responseMap) {
|
||||||
|
this(responseMap, Collections.emptyList());
|
||||||
|
}
|
||||||
|
|
||||||
|
FieldCapabilitiesResponse(List<FieldCapabilitiesIndexResponse> indexResponses) {
|
||||||
|
this(Collections.emptyMap(), indexResponses);
|
||||||
|
}
|
||||||
|
|
||||||
|
private FieldCapabilitiesResponse(Map<String, Map<String, FieldCapabilities>> responseMap,
|
||||||
|
List<FieldCapabilitiesIndexResponse> indexResponses) {
|
||||||
this.responseMap = responseMap;
|
this.responseMap = responseMap;
|
||||||
|
this.indexResponses = indexResponses;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -53,6 +66,13 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||||||
return responseMap;
|
return responseMap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the actual per-index field caps responses
|
||||||
|
*/
|
||||||
|
List<FieldCapabilitiesIndexResponse> getIndexResponses() {
|
||||||
|
return indexResponses;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* Get the field capabilities per type for the provided {@code field}.
|
* Get the field capabilities per type for the provided {@code field}.
|
||||||
@ -66,6 +86,11 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
this.responseMap =
|
this.responseMap =
|
||||||
in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField);
|
in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField);
|
||||||
|
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||||
|
indexResponses = in.readList(FieldCapabilitiesIndexResponse::new);
|
||||||
|
} else {
|
||||||
|
indexResponses = Collections.emptyList();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Map<String, FieldCapabilities> readField(StreamInput in) throws IOException {
|
private static Map<String, FieldCapabilities> readField(StreamInput in) throws IOException {
|
||||||
@ -76,6 +101,10 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);
|
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);
|
||||||
|
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||||
|
out.writeList(indexResponses);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void writeField(StreamOutput out,
|
private static void writeField(StreamOutput out,
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
package org.elasticsearch.action.fieldcaps;
|
package org.elasticsearch.action.fieldcaps;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.action.OriginalIndices;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.HandledTransportAction;
|
import org.elasticsearch.action.support.HandledTransportAction;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
@ -27,18 +28,27 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
import org.elasticsearch.transport.RemoteClusterAware;
|
||||||
|
import org.elasticsearch.transport.RemoteClusterService;
|
||||||
|
import org.elasticsearch.transport.Transport;
|
||||||
|
import org.elasticsearch.transport.TransportException;
|
||||||
|
import org.elasticsearch.transport.TransportRequestOptions;
|
||||||
|
import org.elasticsearch.transport.TransportResponseHandler;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
|
||||||
|
|
||||||
public class TransportFieldCapabilitiesAction
|
public class TransportFieldCapabilitiesAction extends HandledTransportAction<FieldCapabilitiesRequest, FieldCapabilitiesResponse> {
|
||||||
extends HandledTransportAction<FieldCapabilitiesRequest, FieldCapabilitiesResponse> {
|
|
||||||
private final ClusterService clusterService;
|
private final ClusterService clusterService;
|
||||||
private final TransportFieldCapabilitiesIndexAction shardAction;
|
private final TransportFieldCapabilitiesIndexAction shardAction;
|
||||||
|
private final RemoteClusterService remoteClusterService;
|
||||||
|
private final TransportService transportService;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public TransportFieldCapabilitiesAction(Settings settings, TransportService transportService,
|
public TransportFieldCapabilitiesAction(Settings settings, TransportService transportService,
|
||||||
@ -50,71 +60,97 @@ public class TransportFieldCapabilitiesAction
|
|||||||
super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService,
|
super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService,
|
||||||
actionFilters, indexNameExpressionResolver, FieldCapabilitiesRequest::new);
|
actionFilters, indexNameExpressionResolver, FieldCapabilitiesRequest::new);
|
||||||
this.clusterService = clusterService;
|
this.clusterService = clusterService;
|
||||||
|
this.remoteClusterService = transportService.getRemoteClusterService();
|
||||||
|
this.transportService = transportService;
|
||||||
this.shardAction = shardAction;
|
this.shardAction = shardAction;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doExecute(FieldCapabilitiesRequest request,
|
protected void doExecute(FieldCapabilitiesRequest request,
|
||||||
final ActionListener<FieldCapabilitiesResponse> listener) {
|
final ActionListener<FieldCapabilitiesResponse> listener) {
|
||||||
ClusterState clusterState = clusterService.state();
|
final ClusterState clusterState = clusterService.state();
|
||||||
String[] concreteIndices =
|
final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(request.indicesOptions(),
|
||||||
indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
request.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||||
final AtomicInteger indexCounter = new AtomicInteger();
|
final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||||
final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length);
|
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices);
|
||||||
final AtomicReferenceArray<Object> indexResponses =
|
final int totalNumRequest = concreteIndices.length + remoteClusterIndices.size();
|
||||||
new AtomicReferenceArray<>(concreteIndices.length);
|
final CountDown completionCounter = new CountDown(totalNumRequest);
|
||||||
if (concreteIndices.length == 0) {
|
final List<FieldCapabilitiesIndexResponse> indexResponses = Collections.synchronizedList(new ArrayList<>());
|
||||||
|
final Runnable onResponse = () -> {
|
||||||
|
if (completionCounter.countDown()) {
|
||||||
|
if (request.isMergeResults()) {
|
||||||
|
listener.onResponse(merge(indexResponses));
|
||||||
|
} else {
|
||||||
|
listener.onResponse(new FieldCapabilitiesResponse(indexResponses));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if (totalNumRequest == 0) {
|
||||||
listener.onResponse(new FieldCapabilitiesResponse());
|
listener.onResponse(new FieldCapabilitiesResponse());
|
||||||
} else {
|
} else {
|
||||||
|
ActionListener<FieldCapabilitiesIndexResponse> innerListener = new ActionListener<FieldCapabilitiesIndexResponse>() {
|
||||||
|
@Override
|
||||||
|
public void onResponse(FieldCapabilitiesIndexResponse result) {
|
||||||
|
indexResponses.add(result);
|
||||||
|
onResponse.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(Exception e) {
|
||||||
|
// TODO we should somehow inform the user that we failed
|
||||||
|
onResponse.run();
|
||||||
|
}
|
||||||
|
};
|
||||||
for (String index : concreteIndices) {
|
for (String index : concreteIndices) {
|
||||||
FieldCapabilitiesIndexRequest indexRequest =
|
shardAction.execute(new FieldCapabilitiesIndexRequest(request.fields(), index), innerListener);
|
||||||
new FieldCapabilitiesIndexRequest(request.fields(), index);
|
}
|
||||||
shardAction.execute(indexRequest,
|
|
||||||
new ActionListener<FieldCapabilitiesIndexResponse> () {
|
// this is the cross cluster part of this API - we force the other cluster to not merge the results but instead
|
||||||
|
// send us back all individual index results.
|
||||||
|
for (Map.Entry<String, OriginalIndices> remoteIndices : remoteClusterIndices.entrySet()) {
|
||||||
|
String clusterAlias = remoteIndices.getKey();
|
||||||
|
OriginalIndices originalIndices = remoteIndices.getValue();
|
||||||
|
Transport.Connection connection = remoteClusterService.getConnection(remoteIndices.getKey());
|
||||||
|
FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest();
|
||||||
|
remoteRequest.setMergeResults(false); // we need to merge on this node
|
||||||
|
remoteRequest.indicesOptions(originalIndices.indicesOptions());
|
||||||
|
remoteRequest.indices(originalIndices.indices());
|
||||||
|
remoteRequest.fields(request.fields());
|
||||||
|
transportService.sendRequest(connection, FieldCapabilitiesAction.NAME, remoteRequest, TransportRequestOptions.EMPTY,
|
||||||
|
new TransportResponseHandler<FieldCapabilitiesResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(FieldCapabilitiesIndexResponse result) {
|
public FieldCapabilitiesResponse newInstance() {
|
||||||
indexResponses.set(indexCounter.getAndIncrement(), result);
|
return new FieldCapabilitiesResponse();
|
||||||
if (completionCounter.decrementAndGet() == 0) {
|
|
||||||
listener.onResponse(merge(indexResponses));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFailure(Exception e) {
|
public void handleResponse(FieldCapabilitiesResponse response) {
|
||||||
indexResponses.set(indexCounter.getAndIncrement(), e);
|
for (FieldCapabilitiesIndexResponse res : response.getIndexResponses()) {
|
||||||
if (completionCounter.decrementAndGet() == 0) {
|
indexResponses.add(new FieldCapabilitiesIndexResponse(RemoteClusterAware.buildRemoteIndexName(clusterAlias,
|
||||||
listener.onResponse(merge(indexResponses));
|
res.getIndexName()), res.get()));
|
||||||
}
|
}
|
||||||
|
onResponse.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void handleException(TransportException exp) {
|
||||||
|
onResponse.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String executor() {
|
||||||
|
return ThreadPool.Names.SAME;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private FieldCapabilitiesResponse merge(AtomicReferenceArray<Object> indexResponses) {
|
private FieldCapabilitiesResponse merge(List<FieldCapabilitiesIndexResponse> indexResponses) {
|
||||||
Map<String, Map<String, FieldCapabilities.Builder>> responseMapBuilder = new HashMap<> ();
|
Map<String, Map<String, FieldCapabilities.Builder>> responseMapBuilder = new HashMap<> ();
|
||||||
for (int i = 0; i < indexResponses.length(); i++) {
|
for (FieldCapabilitiesIndexResponse response : indexResponses) {
|
||||||
Object element = indexResponses.get(i);
|
innerMerge(responseMapBuilder, response.getIndexName(), response.get());
|
||||||
if (element instanceof FieldCapabilitiesIndexResponse == false) {
|
|
||||||
assert element instanceof Exception;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
FieldCapabilitiesIndexResponse response = (FieldCapabilitiesIndexResponse) element;
|
|
||||||
for (String field : response.get().keySet()) {
|
|
||||||
Map<String, FieldCapabilities.Builder> typeMap = responseMapBuilder.get(field);
|
|
||||||
if (typeMap == null) {
|
|
||||||
typeMap = new HashMap<> ();
|
|
||||||
responseMapBuilder.put(field, typeMap);
|
|
||||||
}
|
|
||||||
FieldCapabilities fieldCap = response.getField(field);
|
|
||||||
FieldCapabilities.Builder builder = typeMap.get(fieldCap.getType());
|
|
||||||
if (builder == null) {
|
|
||||||
builder = new FieldCapabilities.Builder(field, fieldCap.getType());
|
|
||||||
typeMap.put(fieldCap.getType(), builder);
|
|
||||||
}
|
|
||||||
builder.add(response.getIndexName(),
|
|
||||||
fieldCap.isSearchable(), fieldCap.isAggregatable());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||||
@ -131,4 +167,16 @@ public class TransportFieldCapabilitiesAction
|
|||||||
|
|
||||||
return new FieldCapabilitiesResponse(responseMap);
|
return new FieldCapabilitiesResponse(responseMap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void innerMerge(Map<String, Map<String, FieldCapabilities.Builder>> responseMapBuilder, String indexName,
|
||||||
|
Map<String, FieldCapabilities> map) {
|
||||||
|
for (Map.Entry<String, FieldCapabilities> entry : map.entrySet()) {
|
||||||
|
final String field = entry.getKey();
|
||||||
|
final FieldCapabilities fieldCap = entry.getValue();
|
||||||
|
Map<String, FieldCapabilities.Builder> typeMap = responseMapBuilder.computeIfAbsent(field, f -> new HashMap<>());
|
||||||
|
FieldCapabilities.Builder builder = typeMap.computeIfAbsent(fieldCap.getType(), key -> new FieldCapabilities.Builder(field,
|
||||||
|
key));
|
||||||
|
builder.add(indexName, fieldCap.isSearchable(), fieldCap.isAggregatable());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -41,34 +41,19 @@ import java.util.HashSet;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
public class TransportFieldCapabilitiesIndexAction
|
public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardAction<FieldCapabilitiesIndexRequest,
|
||||||
extends TransportSingleShardAction<FieldCapabilitiesIndexRequest,
|
|
||||||
FieldCapabilitiesIndexResponse> {
|
FieldCapabilitiesIndexResponse> {
|
||||||
|
|
||||||
private static final String ACTION_NAME = FieldCapabilitiesAction.NAME + "[index]";
|
private static final String ACTION_NAME = FieldCapabilitiesAction.NAME + "[index]";
|
||||||
|
|
||||||
protected final ClusterService clusterService;
|
|
||||||
private final IndicesService indicesService;
|
private final IndicesService indicesService;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public TransportFieldCapabilitiesIndexAction(Settings settings,
|
public TransportFieldCapabilitiesIndexAction(Settings settings, ClusterService clusterService, TransportService transportService,
|
||||||
ClusterService clusterService,
|
IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||||
TransportService transportService,
|
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||||
IndicesService indicesService,
|
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||||
ThreadPool threadPool,
|
FieldCapabilitiesIndexRequest::new, ThreadPool.Names.MANAGEMENT);
|
||||||
ActionFilters actionFilters,
|
|
||||||
IndexNameExpressionResolver
|
|
||||||
indexNameExpressionResolver) {
|
|
||||||
super(settings,
|
|
||||||
ACTION_NAME,
|
|
||||||
threadPool,
|
|
||||||
clusterService,
|
|
||||||
transportService,
|
|
||||||
actionFilters,
|
|
||||||
indexNameExpressionResolver,
|
|
||||||
FieldCapabilitiesIndexRequest::new,
|
|
||||||
ThreadPool.Names.MANAGEMENT);
|
|
||||||
this.clusterService = clusterService;
|
|
||||||
this.indicesService = indicesService;
|
this.indicesService = indicesService;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,11 +71,8 @@ public class TransportFieldCapabilitiesIndexAction
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected FieldCapabilitiesIndexResponse shardOperation(
|
protected FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesIndexRequest request, ShardId shardId) {
|
||||||
final FieldCapabilitiesIndexRequest request,
|
MapperService mapperService = indicesService.indexServiceSafe(shardId.getIndex()).mapperService();
|
||||||
ShardId shardId) {
|
|
||||||
MapperService mapperService =
|
|
||||||
indicesService.indexServiceSafe(shardId.getIndex()).mapperService();
|
|
||||||
Set<String> fieldNames = new HashSet<>();
|
Set<String> fieldNames = new HashSet<>();
|
||||||
for (String field : request.fields()) {
|
for (String field : request.fields()) {
|
||||||
fieldNames.addAll(mapperService.simpleMatchToIndexNames(field));
|
fieldNames.addAll(mapperService.simpleMatchToIndexNames(field));
|
||||||
@ -98,11 +80,10 @@ public class TransportFieldCapabilitiesIndexAction
|
|||||||
Map<String, FieldCapabilities> responseMap = new HashMap<>();
|
Map<String, FieldCapabilities> responseMap = new HashMap<>();
|
||||||
for (String field : fieldNames) {
|
for (String field : fieldNames) {
|
||||||
MappedFieldType ft = mapperService.fullName(field);
|
MappedFieldType ft = mapperService.fullName(field);
|
||||||
FieldCapabilities fieldCap = new FieldCapabilities(field,
|
if (ft != null) {
|
||||||
ft.typeName(),
|
FieldCapabilities fieldCap = new FieldCapabilities(field, ft.typeName(), ft.isSearchable(), ft.isAggregatable());
|
||||||
ft.isSearchable(),
|
responseMap.put(field, fieldCap);
|
||||||
ft.isAggregatable());
|
}
|
||||||
responseMap.put(field, fieldCap);
|
|
||||||
}
|
}
|
||||||
return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap);
|
return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap);
|
||||||
}
|
}
|
||||||
@ -113,9 +94,7 @@ public class TransportFieldCapabilitiesIndexAction
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ClusterBlockException checkRequestBlock(ClusterState state,
|
protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) {
|
||||||
InternalRequest request) {
|
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, request.concreteIndex());
|
||||||
return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ,
|
|
||||||
request.concreteIndex());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,35 +178,17 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||||||
final SearchTimeProvider timeProvider =
|
final SearchTimeProvider timeProvider =
|
||||||
new SearchTimeProvider(absoluteStartMillis, relativeStartNanos, System::nanoTime);
|
new SearchTimeProvider(absoluteStartMillis, relativeStartNanos, System::nanoTime);
|
||||||
|
|
||||||
final OriginalIndices localIndices;
|
|
||||||
final Map<String, OriginalIndices> remoteClusterIndices;
|
|
||||||
final ClusterState clusterState = clusterService.state();
|
|
||||||
if (remoteClusterService.isCrossClusterSearchEnabled()) {
|
|
||||||
final Map<String, List<String>> groupedIndices = remoteClusterService.groupClusterIndices(searchRequest.indices(),
|
|
||||||
// empty string is not allowed
|
|
||||||
idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
|
||||||
List<String> remove = groupedIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
|
||||||
String[] indices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]);
|
|
||||||
localIndices = new OriginalIndices(indices, searchRequest.indicesOptions());
|
|
||||||
Map<String, OriginalIndices> originalIndicesMap = new HashMap<>();
|
|
||||||
for (Map.Entry<String, List<String>> entry : groupedIndices.entrySet()) {
|
|
||||||
String clusterAlias = entry.getKey();
|
|
||||||
List<String> originalIndices = entry.getValue();
|
|
||||||
originalIndicesMap.put(clusterAlias,
|
|
||||||
new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), searchRequest.indicesOptions()));
|
|
||||||
}
|
|
||||||
remoteClusterIndices = Collections.unmodifiableMap(originalIndicesMap);
|
|
||||||
} else {
|
|
||||||
remoteClusterIndices = Collections.emptyMap();
|
|
||||||
localIndices = new OriginalIndices(searchRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
final ClusterState clusterState = clusterService.state();
|
||||||
|
final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(searchRequest.indicesOptions(),
|
||||||
|
searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||||
|
OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||||
if (remoteClusterIndices.isEmpty()) {
|
if (remoteClusterIndices.isEmpty()) {
|
||||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(),
|
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(),
|
||||||
(clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener);
|
(clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener);
|
||||||
} else {
|
} else {
|
||||||
remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices,
|
remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(),
|
||||||
ActionListener.wrap((searchShardsResponses) -> {
|
remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> {
|
||||||
List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
|
List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
|
||||||
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
|
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
|
||||||
BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses,
|
BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses,
|
||||||
@ -230,28 +212,31 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||||||
for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) {
|
for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) {
|
||||||
idToDiscoveryNode.put(remoteNode.getId(), remoteNode);
|
idToDiscoveryNode.put(remoteNode.getId(), remoteNode);
|
||||||
}
|
}
|
||||||
Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters();
|
final Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters();
|
||||||
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
|
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
|
||||||
//add the cluster name to the remote index names for indices disambiguation
|
//add the cluster name to the remote index names for indices disambiguation
|
||||||
//this ends up in the hits returned with the search response
|
//this ends up in the hits returned with the search response
|
||||||
ShardId shardId = clusterSearchShardsGroup.getShardId();
|
ShardId shardId = clusterSearchShardsGroup.getShardId();
|
||||||
Index remoteIndex = shardId.getIndex();
|
Index remoteIndex = shardId.getIndex();
|
||||||
Index index = new Index(clusterAlias + RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(),
|
Index index = new Index(RemoteClusterAware.buildRemoteIndexName(clusterAlias, remoteIndex.getName()),
|
||||||
remoteIndex.getUUID());
|
remoteIndex.getUUID());
|
||||||
OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias);
|
final AliasFilter aliasFilter;
|
||||||
assert originalIndices != null;
|
|
||||||
SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, new ShardId(index, shardId.getId()),
|
|
||||||
Arrays.asList(clusterSearchShardsGroup.getShards()), originalIndices);
|
|
||||||
remoteShardIterators.add(shardIterator);
|
|
||||||
AliasFilter aliasFilter;
|
|
||||||
if (indicesAndFilters == null) {
|
if (indicesAndFilters == null) {
|
||||||
aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY);
|
aliasFilter = AliasFilter.EMPTY;
|
||||||
} else {
|
} else {
|
||||||
aliasFilter = indicesAndFilters.get(shardId.getIndexName());
|
aliasFilter = indicesAndFilters.get(shardId.getIndexName());
|
||||||
assert aliasFilter != null;
|
assert aliasFilter != null : "alias filter must not be null for index: " + shardId.getIndex();
|
||||||
}
|
}
|
||||||
|
String[] aliases = aliasFilter.getAliases();
|
||||||
|
String[] finalIndices = aliases.length == 0 ? new String[] {shardId.getIndexName()} : aliases;
|
||||||
// here we have to map the filters to the UUID since from now on we use the uuid for the lookup
|
// here we have to map the filters to the UUID since from now on we use the uuid for the lookup
|
||||||
aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter);
|
aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter);
|
||||||
|
final OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias);
|
||||||
|
assert originalIndices != null : "original indices are null for clusterAlias: " + clusterAlias;
|
||||||
|
SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, new ShardId(index, shardId.getId()),
|
||||||
|
Arrays.asList(clusterSearchShardsGroup.getShards()), new OriginalIndices(finalIndices,
|
||||||
|
originalIndices.indicesOptions()));
|
||||||
|
remoteShardIterators.add(shardIterator);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return (clusterAlias, nodeId) -> {
|
return (clusterAlias, nodeId) -> {
|
||||||
|
@ -58,9 +58,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.inject.AbstractModule;
|
import org.elasticsearch.common.inject.AbstractModule;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
|
||||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||||
import org.elasticsearch.common.settings.ClusterSettings;
|
import org.elasticsearch.common.settings.ClusterSettings;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
@ -92,19 +90,22 @@ public class ClusterModule extends AbstractModule {
|
|||||||
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING =
|
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING =
|
||||||
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
|
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
|
||||||
|
|
||||||
private final Settings settings;
|
|
||||||
private final ClusterService clusterService;
|
private final ClusterService clusterService;
|
||||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||||
|
private final AllocationDeciders allocationDeciders;
|
||||||
|
private final AllocationService allocationService;
|
||||||
// pkg private for tests
|
// pkg private for tests
|
||||||
final Collection<AllocationDecider> allocationDeciders;
|
final Collection<AllocationDecider> deciderList;
|
||||||
final ShardsAllocator shardsAllocator;
|
final ShardsAllocator shardsAllocator;
|
||||||
|
|
||||||
public ClusterModule(Settings settings, ClusterService clusterService, List<ClusterPlugin> clusterPlugins) {
|
public ClusterModule(Settings settings, ClusterService clusterService, List<ClusterPlugin> clusterPlugins,
|
||||||
this.settings = settings;
|
ClusterInfoService clusterInfoService) {
|
||||||
this.allocationDeciders = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins);
|
this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins);
|
||||||
|
this.allocationDeciders = new AllocationDeciders(settings, deciderList);
|
||||||
this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins);
|
this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins);
|
||||||
this.clusterService = clusterService;
|
this.clusterService = clusterService;
|
||||||
indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
|
this.indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
|
||||||
|
this.allocationService = new AllocationService(settings, allocationDeciders, shardsAllocator, clusterInfoService);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -213,10 +214,14 @@ public class ClusterModule extends AbstractModule {
|
|||||||
"ShardsAllocator factory for [" + allocatorName + "] returned null");
|
"ShardsAllocator factory for [" + allocatorName + "] returned null");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public AllocationService getAllocationService() {
|
||||||
|
return allocationService;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void configure() {
|
protected void configure() {
|
||||||
bind(GatewayAllocator.class).asEagerSingleton();
|
bind(GatewayAllocator.class).asEagerSingleton();
|
||||||
bind(AllocationService.class).asEagerSingleton();
|
bind(AllocationService.class).toInstance(allocationService);
|
||||||
bind(ClusterService.class).toInstance(clusterService);
|
bind(ClusterService.class).toInstance(clusterService);
|
||||||
bind(NodeConnectionsService.class).asEagerSingleton();
|
bind(NodeConnectionsService.class).asEagerSingleton();
|
||||||
bind(MetaDataCreateIndexService.class).asEagerSingleton();
|
bind(MetaDataCreateIndexService.class).asEagerSingleton();
|
||||||
@ -233,7 +238,7 @@ public class ClusterModule extends AbstractModule {
|
|||||||
bind(NodeMappingRefreshAction.class).asEagerSingleton();
|
bind(NodeMappingRefreshAction.class).asEagerSingleton();
|
||||||
bind(MappingUpdatedAction.class).asEagerSingleton();
|
bind(MappingUpdatedAction.class).asEagerSingleton();
|
||||||
bind(TaskResultsService.class).asEagerSingleton();
|
bind(TaskResultsService.class).asEagerSingleton();
|
||||||
bind(AllocationDeciders.class).toInstance(new AllocationDeciders(settings, allocationDeciders));
|
bind(AllocationDeciders.class).toInstance(allocationDeciders);
|
||||||
bind(ShardsAllocator.class).toInstance(shardsAllocator);
|
bind(ShardsAllocator.class).toInstance(shardsAllocator);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -48,6 +48,7 @@ import java.util.List;
|
|||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.function.Predicate;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
public class IndexNameExpressionResolver extends AbstractComponent {
|
public class IndexNameExpressionResolver extends AbstractComponent {
|
||||||
@ -268,8 +269,19 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||||||
* the index itself - null is returned. Returns <tt>null</tt> if no filtering is required.
|
* the index itself - null is returned. Returns <tt>null</tt> if no filtering is required.
|
||||||
*/
|
*/
|
||||||
public String[] filteringAliases(ClusterState state, String index, String... expressions) {
|
public String[] filteringAliases(ClusterState state, String index, String... expressions) {
|
||||||
|
return indexAliases(state, index, AliasMetaData::filteringRequired, false, expressions);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Iterates through the list of indices and selects the effective list of required aliases for the
|
||||||
|
* given index.
|
||||||
|
* <p>Only aliases where the given predicate tests successfully are returned. If the indices list contains a non-required reference to
|
||||||
|
* the index itself - null is returned. Returns <tt>null</tt> if no filtering is required.
|
||||||
|
*/
|
||||||
|
public String[] indexAliases(ClusterState state, String index, Predicate<AliasMetaData> requiredAlias, boolean skipIdentity,
|
||||||
|
String... expressions) {
|
||||||
// expand the aliases wildcard
|
// expand the aliases wildcard
|
||||||
List<String> resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.<String>emptyList();
|
List<String> resolvedExpressions = expressions != null ? Arrays.asList(expressions) : Collections.emptyList();
|
||||||
Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true);
|
Context context = new Context(state, IndicesOptions.lenientExpandOpen(), true);
|
||||||
for (ExpressionResolver expressionResolver : expressionResolvers) {
|
for (ExpressionResolver expressionResolver : expressionResolvers) {
|
||||||
resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions);
|
resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions);
|
||||||
@ -278,54 +290,50 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||||||
if (isAllIndices(resolvedExpressions)) {
|
if (isAllIndices(resolvedExpressions)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
final IndexMetaData indexMetaData = state.metaData().getIndices().get(index);
|
||||||
|
if (indexMetaData == null) {
|
||||||
|
// Shouldn't happen
|
||||||
|
throw new IndexNotFoundException(index);
|
||||||
|
}
|
||||||
// optimize for the most common single index/alias scenario
|
// optimize for the most common single index/alias scenario
|
||||||
if (resolvedExpressions.size() == 1) {
|
if (resolvedExpressions.size() == 1) {
|
||||||
String alias = resolvedExpressions.get(0);
|
String alias = resolvedExpressions.get(0);
|
||||||
IndexMetaData indexMetaData = state.metaData().getIndices().get(index);
|
|
||||||
if (indexMetaData == null) {
|
|
||||||
// Shouldn't happen
|
|
||||||
throw new IndexNotFoundException(index);
|
|
||||||
}
|
|
||||||
AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias);
|
AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias);
|
||||||
boolean filteringRequired = aliasMetaData != null && aliasMetaData.filteringRequired();
|
if (aliasMetaData == null || requiredAlias.test(aliasMetaData) == false) {
|
||||||
if (!filteringRequired) {
|
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return new String[]{alias};
|
return new String[]{alias};
|
||||||
}
|
}
|
||||||
List<String> filteringAliases = null;
|
List<String> aliases = null;
|
||||||
for (String alias : resolvedExpressions) {
|
for (String alias : resolvedExpressions) {
|
||||||
if (alias.equals(index)) {
|
if (alias.equals(index)) {
|
||||||
return null;
|
if (skipIdentity) {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
IndexMetaData indexMetaData = state.metaData().getIndices().get(index);
|
|
||||||
if (indexMetaData == null) {
|
|
||||||
// Shouldn't happen
|
|
||||||
throw new IndexNotFoundException(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias);
|
AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias);
|
||||||
// Check that this is an alias for the current index
|
// Check that this is an alias for the current index
|
||||||
// Otherwise - skip it
|
// Otherwise - skip it
|
||||||
if (aliasMetaData != null) {
|
if (aliasMetaData != null) {
|
||||||
boolean filteringRequired = aliasMetaData.filteringRequired();
|
if (requiredAlias.test(aliasMetaData)) {
|
||||||
if (filteringRequired) {
|
// If required - add it to the list of aliases
|
||||||
// If filtering required - add it to the list of filters
|
if (aliases == null) {
|
||||||
if (filteringAliases == null) {
|
aliases = new ArrayList<>();
|
||||||
filteringAliases = new ArrayList<>();
|
|
||||||
}
|
}
|
||||||
filteringAliases.add(alias);
|
aliases.add(alias);
|
||||||
} else {
|
} else {
|
||||||
// If not, we have a non filtering alias for this index - no filtering needed
|
// If not, we have a non required alias for this index - no futher checking needed
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (filteringAliases == null) {
|
if (aliases == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return filteringAliases.toArray(new String[filteringAliases.size()]);
|
return aliases.toArray(new String[aliases.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -238,11 +238,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
|
|||||||
int rolesSize = in.readVInt();
|
int rolesSize = in.readVInt();
|
||||||
this.roles = EnumSet.noneOf(Role.class);
|
this.roles = EnumSet.noneOf(Role.class);
|
||||||
for (int i = 0; i < rolesSize; i++) {
|
for (int i = 0; i < rolesSize; i++) {
|
||||||
int ordinal = in.readVInt();
|
this.roles.add(in.readEnum(Role.class));
|
||||||
if (ordinal < 0 || ordinal >= Role.values().length) {
|
|
||||||
throw new IOException("Unknown Role ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
this.roles.add(Role.values()[ordinal]);
|
|
||||||
}
|
}
|
||||||
this.version = Version.readVersion(in);
|
this.version = Version.readVersion(in);
|
||||||
}
|
}
|
||||||
@ -262,7 +258,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
|
|||||||
}
|
}
|
||||||
out.writeVInt(roles.size());
|
out.writeVInt(roles.size());
|
||||||
for (Role role : roles) {
|
for (Role role : roles) {
|
||||||
out.writeVInt(role.ordinal());
|
out.writeEnum(role);
|
||||||
}
|
}
|
||||||
Version.writeVersion(version, out);
|
Version.writeVersion(version, out);
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
|||||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.gateway.GatewayAllocator;
|
import org.elasticsearch.gateway.GatewayAllocator;
|
||||||
|
|
||||||
@ -61,20 +60,29 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NOD
|
|||||||
public class AllocationService extends AbstractComponent {
|
public class AllocationService extends AbstractComponent {
|
||||||
|
|
||||||
private final AllocationDeciders allocationDeciders;
|
private final AllocationDeciders allocationDeciders;
|
||||||
private final GatewayAllocator gatewayAllocator;
|
private GatewayAllocator gatewayAllocator;
|
||||||
private final ShardsAllocator shardsAllocator;
|
private final ShardsAllocator shardsAllocator;
|
||||||
private final ClusterInfoService clusterInfoService;
|
private final ClusterInfoService clusterInfoService;
|
||||||
|
|
||||||
@Inject
|
public AllocationService(Settings settings, AllocationDeciders allocationDeciders,
|
||||||
public AllocationService(Settings settings, AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator,
|
GatewayAllocator gatewayAllocator,
|
||||||
|
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
|
||||||
|
this(settings, allocationDeciders, shardsAllocator, clusterInfoService);
|
||||||
|
setGatewayAllocator(gatewayAllocator);
|
||||||
|
}
|
||||||
|
|
||||||
|
public AllocationService(Settings settings, AllocationDeciders allocationDeciders,
|
||||||
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
|
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.allocationDeciders = allocationDeciders;
|
this.allocationDeciders = allocationDeciders;
|
||||||
this.gatewayAllocator = gatewayAllocator;
|
|
||||||
this.shardsAllocator = shardsAllocator;
|
this.shardsAllocator = shardsAllocator;
|
||||||
this.clusterInfoService = clusterInfoService;
|
this.clusterInfoService = clusterInfoService;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setGatewayAllocator(GatewayAllocator gatewayAllocator) {
|
||||||
|
this.gatewayAllocator = gatewayAllocator;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be
|
* Applies the started shards. Note, only initializing ShardRouting instances that exist in the routing table should be
|
||||||
* provided as parameter and no duplicates should be contained.
|
* provided as parameter and no duplicates should be contained.
|
||||||
|
@ -44,16 +44,12 @@ public enum ShapeRelation implements Writeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static ShapeRelation readFromStream(StreamInput in) throws IOException {
|
public static ShapeRelation readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(ShapeRelation.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown ShapeRelation ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ShapeRelation getRelationByName(String name) {
|
public static ShapeRelation getRelationByName(String name) {
|
||||||
|
@ -40,16 +40,12 @@ public enum SpatialStrategy implements Writeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static SpatialStrategy readFromStream(StreamInput in) throws IOException {
|
public static SpatialStrategy readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(SpatialStrategy.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown SpatialStrategy ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SpatialStrategy fromString(String strategyName) {
|
public static SpatialStrategy fromString(String strategyName) {
|
||||||
|
@ -901,6 +901,18 @@ public abstract class StreamInput extends InputStream {
|
|||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads an enum with type E that was serialized based on the value of it's ordinal
|
||||||
|
*/
|
||||||
|
public <E extends Enum<E>> E readEnum(Class<E> enumClass) throws IOException {
|
||||||
|
int ordinal = readVInt();
|
||||||
|
E[] values = enumClass.getEnumConstants();
|
||||||
|
if (ordinal < 0 || ordinal >= values.length) {
|
||||||
|
throw new IOException("Unknown " + enumClass.getSimpleName() + " ordinal [" + ordinal + "]");
|
||||||
|
}
|
||||||
|
return values[ordinal];
|
||||||
|
}
|
||||||
|
|
||||||
public static StreamInput wrap(byte[] bytes) {
|
public static StreamInput wrap(byte[] bytes) {
|
||||||
return wrap(bytes, 0, bytes.length);
|
return wrap(bytes, 0, bytes.length);
|
||||||
}
|
}
|
||||||
|
@ -936,4 +936,11 @@ public abstract class StreamOutput extends OutputStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes an enum with type E that by serialized it based on it's ordinal value
|
||||||
|
*/
|
||||||
|
public <E extends Enum<E>> void writeEnum(E enumValue) throws IOException {
|
||||||
|
writeVInt(enumValue.ordinal());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.common.lucene.search;
|
|||||||
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.queries.ExtendedCommonTermsQuery;
|
import org.apache.lucene.queries.ExtendedCommonTermsQuery;
|
||||||
import org.apache.lucene.search.AutomatonQuery;
|
|
||||||
import org.apache.lucene.search.BooleanClause;
|
import org.apache.lucene.search.BooleanClause;
|
||||||
import org.apache.lucene.search.BooleanClause.Occur;
|
import org.apache.lucene.search.BooleanClause.Occur;
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
@ -31,9 +30,6 @@ import org.apache.lucene.search.MatchNoDocsQuery;
|
|||||||
import org.apache.lucene.search.PrefixQuery;
|
import org.apache.lucene.search.PrefixQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.automaton.Automata;
|
|
||||||
import org.apache.lucene.util.automaton.Automaton;
|
|
||||||
import org.apache.lucene.util.automaton.Operations;
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||||
|
|
||||||
@ -42,29 +38,6 @@ import java.util.regex.Pattern;
|
|||||||
|
|
||||||
public class Queries {
|
public class Queries {
|
||||||
|
|
||||||
private static final Automaton NON_NESTED_TYPE_AUTOMATON;
|
|
||||||
static {
|
|
||||||
Automaton nestedTypeAutomaton = Operations.concatenate(
|
|
||||||
Automata.makeString("__"),
|
|
||||||
Automata.makeAnyString());
|
|
||||||
NON_NESTED_TYPE_AUTOMATON = Operations.complement(nestedTypeAutomaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We use a custom class rather than AutomatonQuery directly in order to
|
|
||||||
// have a better toString
|
|
||||||
private static class NonNestedQuery extends AutomatonQuery {
|
|
||||||
|
|
||||||
NonNestedQuery() {
|
|
||||||
super(new Term(TypeFieldMapper.NAME), NON_NESTED_TYPE_AUTOMATON);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString(String field) {
|
|
||||||
return "_type:[^_].*";
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Query newMatchAllQuery() {
|
public static Query newMatchAllQuery() {
|
||||||
return new MatchAllDocsQuery();
|
return new MatchAllDocsQuery();
|
||||||
}
|
}
|
||||||
@ -79,9 +52,11 @@ public class Queries {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static Query newNonNestedFilter() {
|
public static Query newNonNestedFilter() {
|
||||||
// we use this automaton query rather than a negation of newNestedFilter
|
// TODO: this is slow, make it a positive query
|
||||||
// since purely negative queries against high-cardinality clauses are costly
|
return new BooleanQuery.Builder()
|
||||||
return new NonNestedQuery();
|
.add(new MatchAllDocsQuery(), Occur.FILTER)
|
||||||
|
.add(newNestedFilter(), Occur.MUST_NOT)
|
||||||
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) {
|
public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) {
|
||||||
|
@ -143,15 +143,11 @@ public enum CombineFunction implements Writeable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static CombineFunction readFromStream(StreamInput in) throws IOException {
|
public static CombineFunction readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(CombineFunction.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown CombineFunction ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static CombineFunction fromString(String combineFunction) {
|
public static CombineFunction fromString(String combineFunction) {
|
||||||
|
@ -191,15 +191,11 @@ public class FieldValueFactorFunction extends ScoreFunction {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Modifier readFromStream(StreamInput in) throws IOException {
|
public static Modifier readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(Modifier.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown Modifier ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -81,15 +81,11 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ScoreMode readFromStream(StreamInput in) throws IOException {
|
public static ScoreMode readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(ScoreMode.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown ScoreMode ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ScoreMode fromString(String scoreMode) {
|
public static ScoreMode fromString(String scoreMode) {
|
||||||
|
@ -84,5 +84,4 @@ public class ArrayUtils {
|
|||||||
System.arraycopy(other, 0, target, one.length, other.length);
|
System.arraycopy(other, 0, target, one.length, other.length);
|
||||||
return target;
|
return target;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,6 @@ import org.elasticsearch.ElasticsearchException;
|
|||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.component.LifecycleComponent;
|
import org.elasticsearch.common.component.LifecycleComponent;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
@ -37,12 +36,6 @@ import java.io.IOException;
|
|||||||
*/
|
*/
|
||||||
public interface Discovery extends LifecycleComponent {
|
public interface Discovery extends LifecycleComponent {
|
||||||
|
|
||||||
/**
|
|
||||||
* Another hack to solve dep injection problem..., note, this will be called before
|
|
||||||
* any start is called.
|
|
||||||
*/
|
|
||||||
void setAllocationService(AllocationService allocationService);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Publish all the changes to the cluster from the master (can be called just by the master). The publish
|
* Publish all the changes to the cluster from the master (can be called just by the master). The publish
|
||||||
* process should apply this state to the master as well!
|
* process should apply this state to the master as well!
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.discovery;
|
package org.elasticsearch.discovery;
|
||||||
|
|
||||||
|
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||||
import org.elasticsearch.cluster.service.MasterService;
|
import org.elasticsearch.cluster.service.MasterService;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
@ -58,7 +59,8 @@ public class DiscoveryModule {
|
|||||||
|
|
||||||
public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService,
|
public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||||
NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService,
|
NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService,
|
||||||
ClusterApplier clusterApplier, ClusterSettings clusterSettings, List<DiscoveryPlugin> plugins) {
|
ClusterApplier clusterApplier, ClusterSettings clusterSettings, List<DiscoveryPlugin> plugins,
|
||||||
|
AllocationService allocationService) {
|
||||||
final UnicastHostsProvider hostsProvider;
|
final UnicastHostsProvider hostsProvider;
|
||||||
|
|
||||||
Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>();
|
Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>();
|
||||||
@ -83,12 +85,12 @@ public class DiscoveryModule {
|
|||||||
Map<String, Supplier<Discovery>> discoveryTypes = new HashMap<>();
|
Map<String, Supplier<Discovery>> discoveryTypes = new HashMap<>();
|
||||||
discoveryTypes.put("zen",
|
discoveryTypes.put("zen",
|
||||||
() -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier,
|
() -> new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier,
|
||||||
clusterSettings, hostsProvider));
|
clusterSettings, hostsProvider, allocationService));
|
||||||
discoveryTypes.put("tribe", () -> new TribeDiscovery(settings, transportService, clusterApplier));
|
discoveryTypes.put("tribe", () -> new TribeDiscovery(settings, transportService, clusterApplier));
|
||||||
discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, clusterApplier));
|
discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, clusterApplier));
|
||||||
for (DiscoveryPlugin plugin : plugins) {
|
for (DiscoveryPlugin plugin : plugins) {
|
||||||
plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry,
|
plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry,
|
||||||
masterService, clusterApplier, clusterSettings, hostsProvider).entrySet().forEach(entry -> {
|
masterService, clusterApplier, clusterSettings, hostsProvider, allocationService).entrySet().forEach(entry -> {
|
||||||
if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) {
|
if (discoveryTypes.put(entry.getKey(), entry.getValue()) != null) {
|
||||||
throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice");
|
throw new IllegalArgumentException("Cannot register discovery type [" + entry.getKey() + "] twice");
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ClusterStateTaskListener;
|
|||||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
|
||||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
@ -59,11 +58,6 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
|
|||||||
this.clusterApplier = clusterApplier;
|
this.clusterApplier = clusterApplier;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setAllocationService(final AllocationService allocationService) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void publish(final ClusterChangedEvent event,
|
public synchronized void publish(final ClusterChangedEvent event,
|
||||||
final AckListener ackListener) {
|
final AckListener ackListener) {
|
||||||
|
@ -109,7 +109,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||||||
|
|
||||||
private final TransportService transportService;
|
private final TransportService transportService;
|
||||||
private final MasterService masterService;
|
private final MasterService masterService;
|
||||||
private AllocationService allocationService;
|
|
||||||
private final ClusterName clusterName;
|
private final ClusterName clusterName;
|
||||||
private final DiscoverySettings discoverySettings;
|
private final DiscoverySettings discoverySettings;
|
||||||
protected final ZenPing zenPing; // protected to allow tests access
|
protected final ZenPing zenPing; // protected to allow tests access
|
||||||
@ -140,9 +139,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||||||
|
|
||||||
private final JoinThreadControl joinThreadControl;
|
private final JoinThreadControl joinThreadControl;
|
||||||
|
|
||||||
// must initialized in doStart(), when we have the allocationService set
|
private final NodeJoinController nodeJoinController;
|
||||||
private volatile NodeJoinController nodeJoinController;
|
private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
|
||||||
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
|
|
||||||
|
|
||||||
private final ClusterApplier clusterApplier;
|
private final ClusterApplier clusterApplier;
|
||||||
private final AtomicReference<ClusterState> state; // last committed cluster state
|
private final AtomicReference<ClusterState> state; // last committed cluster state
|
||||||
@ -151,7 +149,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||||||
|
|
||||||
public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService,
|
public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||||
NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier,
|
NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier,
|
||||||
ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) {
|
ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider, AllocationService allocationService) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.masterService = masterService;
|
this.masterService = masterService;
|
||||||
this.clusterApplier = clusterApplier;
|
this.clusterApplier = clusterApplier;
|
||||||
@ -213,6 +211,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||||||
this.membership = new MembershipAction(settings, transportService, new MembershipListener());
|
this.membership = new MembershipAction(settings, transportService, new MembershipListener());
|
||||||
this.joinThreadControl = new JoinThreadControl();
|
this.joinThreadControl = new JoinThreadControl();
|
||||||
|
|
||||||
|
this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster, settings);
|
||||||
|
this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger);
|
||||||
|
|
||||||
transportService.registerRequestHandler(
|
transportService.registerRequestHandler(
|
||||||
DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler());
|
DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest::new, ThreadPool.Names.SAME, new RejoinClusterRequestHandler());
|
||||||
}
|
}
|
||||||
@ -223,11 +224,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||||||
return new UnicastZenPing(settings, threadPool, transportService, hostsProvider);
|
return new UnicastZenPing(settings, threadPool, transportService, hostsProvider);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setAllocationService(AllocationService allocationService) {
|
|
||||||
this.allocationService = allocationService;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doStart() {
|
protected void doStart() {
|
||||||
DiscoveryNode localNode = transportService.getLocalNode();
|
DiscoveryNode localNode = transportService.getLocalNode();
|
||||||
@ -239,8 +235,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||||||
joinThreadControl.start();
|
joinThreadControl.start();
|
||||||
}
|
}
|
||||||
zenPing.start(this);
|
zenPing.start(this);
|
||||||
this.nodeJoinController = new NodeJoinController(masterService, allocationService, electMaster, settings);
|
|
||||||
this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::submitRejoin, logger);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
package org.elasticsearch.env;
|
package org.elasticsearch.env;
|
||||||
|
|
||||||
import org.apache.lucene.util.Constants;
|
import org.apache.lucene.util.Constants;
|
||||||
import org.apache.lucene.util.IOUtils;
|
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
|
||||||
@ -43,24 +42,15 @@ import java.util.List;
|
|||||||
class ESFileStore extends FileStore {
|
class ESFileStore extends FileStore {
|
||||||
/** Underlying filestore */
|
/** Underlying filestore */
|
||||||
final FileStore in;
|
final FileStore in;
|
||||||
/** Cached result of Lucene's {@code IOUtils.spins} on path. */
|
private int majorDeviceNumber;
|
||||||
final Boolean spins;
|
private int minorDeviceNumber;
|
||||||
int majorDeviceNumber;
|
|
||||||
int minorDeviceNumber;
|
|
||||||
|
|
||||||
@SuppressForbidden(reason = "tries to determine if disk is spinning")
|
@SuppressForbidden(reason = "tries to determine if disk is spinning")
|
||||||
// TODO: move PathUtils to be package-private here instead of
|
// TODO: move PathUtils to be package-private here instead of
|
||||||
// public+forbidden api!
|
// public+forbidden api!
|
||||||
ESFileStore(FileStore in) {
|
ESFileStore(final FileStore in) {
|
||||||
this.in = in;
|
this.in = in;
|
||||||
Boolean spins;
|
|
||||||
// Lucene's IOUtils.spins only works on Linux today:
|
|
||||||
if (Constants.LINUX) {
|
if (Constants.LINUX) {
|
||||||
try {
|
|
||||||
spins = IOUtils.spins(PathUtils.get(getMountPointLinux(in)));
|
|
||||||
} catch (Exception e) {
|
|
||||||
spins = null;
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
final List<String> lines = Files.readAllLines(PathUtils.get("/proc/self/mountinfo"));
|
final List<String> lines = Files.readAllLines(PathUtils.get("/proc/self/mountinfo"));
|
||||||
for (final String line : lines) {
|
for (final String line : lines) {
|
||||||
@ -70,20 +60,21 @@ class ESFileStore extends FileStore {
|
|||||||
final String[] deviceNumbers = fields[2].split(":");
|
final String[] deviceNumbers = fields[2].split(":");
|
||||||
majorDeviceNumber = Integer.parseInt(deviceNumbers[0]);
|
majorDeviceNumber = Integer.parseInt(deviceNumbers[0]);
|
||||||
minorDeviceNumber = Integer.parseInt(deviceNumbers[1]);
|
minorDeviceNumber = Integer.parseInt(deviceNumbers[1]);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (final Exception e) {
|
||||||
majorDeviceNumber = -1;
|
majorDeviceNumber = -1;
|
||||||
minorDeviceNumber = -1;
|
minorDeviceNumber = -1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
spins = null;
|
majorDeviceNumber = -1;
|
||||||
|
minorDeviceNumber = -1;
|
||||||
}
|
}
|
||||||
this.spins = spins;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// these are hacks that are not guaranteed
|
// these are hacks that are not guaranteed
|
||||||
private static String getMountPointLinux(FileStore store) {
|
private static String getMountPointLinux(final FileStore store) {
|
||||||
String desc = store.toString();
|
String desc = store.toString();
|
||||||
int index = desc.lastIndexOf(" (");
|
int index = desc.lastIndexOf(" (");
|
||||||
if (index != -1) {
|
if (index != -1) {
|
||||||
@ -92,109 +83,6 @@ class ESFileStore extends FileStore {
|
|||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Files.getFileStore(Path) useless here! Don't complain, just try it yourself.
|
|
||||||
*/
|
|
||||||
@SuppressForbidden(reason = "works around the bugs")
|
|
||||||
static FileStore getMatchingFileStore(Path path, FileStore fileStores[]) throws IOException {
|
|
||||||
if (Constants.WINDOWS) {
|
|
||||||
return getFileStoreWindows(path, fileStores);
|
|
||||||
}
|
|
||||||
|
|
||||||
final FileStore store;
|
|
||||||
try {
|
|
||||||
store = Files.getFileStore(path);
|
|
||||||
} catch (IOException unexpected) {
|
|
||||||
// give a better error message if a filestore cannot be retrieved from inside a FreeBSD jail.
|
|
||||||
if (Constants.FREE_BSD) {
|
|
||||||
throw new IOException("Unable to retrieve mount point data for " + path +
|
|
||||||
". If you are running within a jail, set enforce_statfs=1. See jail(8)", unexpected);
|
|
||||||
} else {
|
|
||||||
throw unexpected;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
String mount = getMountPointLinux(store);
|
|
||||||
FileStore sameMountPoint = null;
|
|
||||||
for (FileStore fs : fileStores) {
|
|
||||||
if (mount.equals(getMountPointLinux(fs))) {
|
|
||||||
if (sameMountPoint == null) {
|
|
||||||
sameMountPoint = fs;
|
|
||||||
} else {
|
|
||||||
// more than one filesystem has the same mount point; something is wrong!
|
|
||||||
// fall back to crappy one we got from Files.getFileStore
|
|
||||||
return store;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sameMountPoint != null) {
|
|
||||||
// ok, we found only one, use it:
|
|
||||||
return sameMountPoint;
|
|
||||||
} else {
|
|
||||||
// fall back to crappy one we got from Files.getFileStore
|
|
||||||
return store;
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
// ignore
|
|
||||||
}
|
|
||||||
|
|
||||||
// fall back to crappy one we got from Files.getFileStore
|
|
||||||
return store;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* remove this code and just use getFileStore for windows on java 9
|
|
||||||
* works around https://bugs.openjdk.java.net/browse/JDK-8034057
|
|
||||||
*/
|
|
||||||
@SuppressForbidden(reason = "works around https://bugs.openjdk.java.net/browse/JDK-8034057")
|
|
||||||
static FileStore getFileStoreWindows(Path path, FileStore fileStores[]) throws IOException {
|
|
||||||
assert Constants.WINDOWS;
|
|
||||||
|
|
||||||
try {
|
|
||||||
return Files.getFileStore(path);
|
|
||||||
} catch (FileSystemException possibleBug) {
|
|
||||||
final char driveLetter;
|
|
||||||
// look for a drive letter to see if its the SUBST bug,
|
|
||||||
// it might be some other type of path, like a windows share
|
|
||||||
// if something goes wrong, we just deliver the original exception
|
|
||||||
try {
|
|
||||||
String root = path.toRealPath().getRoot().toString();
|
|
||||||
if (root.length() < 2) {
|
|
||||||
throw new RuntimeException("root isn't a drive letter: " + root);
|
|
||||||
}
|
|
||||||
driveLetter = Character.toLowerCase(root.charAt(0));
|
|
||||||
if (Character.isAlphabetic(driveLetter) == false || root.charAt(1) != ':') {
|
|
||||||
throw new RuntimeException("root isn't a drive letter: " + root);
|
|
||||||
}
|
|
||||||
} catch (Exception checkFailed) {
|
|
||||||
// something went wrong,
|
|
||||||
possibleBug.addSuppressed(checkFailed);
|
|
||||||
throw possibleBug;
|
|
||||||
}
|
|
||||||
|
|
||||||
// we have a drive letter: the hack begins!!!!!!!!
|
|
||||||
try {
|
|
||||||
// we have no choice but to parse toString of all stores and find the matching drive letter
|
|
||||||
for (FileStore store : fileStores) {
|
|
||||||
String toString = store.toString();
|
|
||||||
int length = toString.length();
|
|
||||||
if (length > 3 && toString.endsWith(":)") && toString.charAt(length - 4) == '(') {
|
|
||||||
if (Character.toLowerCase(toString.charAt(length - 3)) == driveLetter) {
|
|
||||||
return store;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
throw new RuntimeException("no filestores matched");
|
|
||||||
} catch (Exception weTried) {
|
|
||||||
IOException newException = new IOException("Unable to retrieve filestore for '" + path + "', tried matching against " + Arrays.toString(fileStores), weTried);
|
|
||||||
newException.addSuppressed(possibleBug);
|
|
||||||
throw newException;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String name() {
|
public String name() {
|
||||||
@ -263,8 +151,6 @@ class ESFileStore extends FileStore {
|
|||||||
@Override
|
@Override
|
||||||
public Object getAttribute(String attribute) throws IOException {
|
public Object getAttribute(String attribute) throws IOException {
|
||||||
switch(attribute) {
|
switch(attribute) {
|
||||||
// for the device
|
|
||||||
case "lucene:spins": return spins;
|
|
||||||
// for the partition
|
// for the partition
|
||||||
case "lucene:major_device_number": return majorDeviceNumber;
|
case "lucene:major_device_number": return majorDeviceNumber;
|
||||||
case "lucene:minor_device_number": return minorDeviceNumber;
|
case "lucene:minor_device_number": return minorDeviceNumber;
|
||||||
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
@ -35,7 +36,9 @@ import java.nio.file.Files;
|
|||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
|
||||||
@ -97,22 +100,6 @@ public class Environment {
|
|||||||
/** Path to the temporary file directory used by the JDK */
|
/** Path to the temporary file directory used by the JDK */
|
||||||
private final Path tmpFile = PathUtils.get(System.getProperty("java.io.tmpdir"));
|
private final Path tmpFile = PathUtils.get(System.getProperty("java.io.tmpdir"));
|
||||||
|
|
||||||
/** List of filestores on the system */
|
|
||||||
private static final FileStore[] fileStores;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* We have to do this in clinit instead of init, because ES code is pretty messy,
|
|
||||||
* and makes these environments, throws them away, makes them again, etc.
|
|
||||||
*/
|
|
||||||
static {
|
|
||||||
// gather information about filesystems
|
|
||||||
ArrayList<FileStore> allStores = new ArrayList<>();
|
|
||||||
for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) {
|
|
||||||
allStores.add(new ESFileStore(store));
|
|
||||||
}
|
|
||||||
fileStores = allStores.toArray(new ESFileStore[allStores.size()]);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Environment(Settings settings) {
|
public Environment(Settings settings) {
|
||||||
final Path homeFile;
|
final Path homeFile;
|
||||||
if (PATH_HOME_SETTING.exists(settings)) {
|
if (PATH_HOME_SETTING.exists(settings)) {
|
||||||
@ -331,24 +318,8 @@ public class Environment {
|
|||||||
return tmpFile;
|
return tmpFile;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
public static FileStore getFileStore(final Path path) throws IOException {
|
||||||
* Looks up the filestore associated with a Path.
|
return new ESFileStore(Files.getFileStore(path));
|
||||||
* <p>
|
|
||||||
* This is an enhanced version of {@link Files#getFileStore(Path)}:
|
|
||||||
* <ul>
|
|
||||||
* <li>On *nix systems, the store returned for the root filesystem will contain
|
|
||||||
* the actual filesystem type (e.g. {@code ext4}) instead of {@code rootfs}.
|
|
||||||
* <li>On some systems, the custom attribute {@code lucene:spins} is supported
|
|
||||||
* via the {@link FileStore#getAttribute(String)} method.
|
|
||||||
* <li>Only requires the security permissions of {@link Files#getFileStore(Path)},
|
|
||||||
* no permissions to the actual mount point are required.
|
|
||||||
* <li>Exception handling has the same semantics as {@link Files#getFileStore(Path)}.
|
|
||||||
* <li>Works around https://bugs.openjdk.java.net/browse/JDK-8034057.
|
|
||||||
* <li>Gives a better exception when filestore cannot be retrieved from inside a FreeBSD jail.
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
public static FileStore getFileStore(Path path) throws IOException {
|
|
||||||
return ESFileStore.getMatchingFileStore(path, fileStores);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -94,9 +94,6 @@ public final class NodeEnvironment implements Closeable {
|
|||||||
public final Path indicesPath;
|
public final Path indicesPath;
|
||||||
/** Cached FileStore from path */
|
/** Cached FileStore from path */
|
||||||
public final FileStore fileStore;
|
public final FileStore fileStore;
|
||||||
/** Cached result of Lucene's {@code IOUtils.spins} on path. This is a trilean value: null means we could not determine it (we are
|
|
||||||
* not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */
|
|
||||||
public final Boolean spins;
|
|
||||||
|
|
||||||
public final int majorDeviceNumber;
|
public final int majorDeviceNumber;
|
||||||
public final int minorDeviceNumber;
|
public final int minorDeviceNumber;
|
||||||
@ -106,11 +103,9 @@ public final class NodeEnvironment implements Closeable {
|
|||||||
this.indicesPath = path.resolve(INDICES_FOLDER);
|
this.indicesPath = path.resolve(INDICES_FOLDER);
|
||||||
this.fileStore = Environment.getFileStore(path);
|
this.fileStore = Environment.getFileStore(path);
|
||||||
if (fileStore.supportsFileAttributeView("lucene")) {
|
if (fileStore.supportsFileAttributeView("lucene")) {
|
||||||
this.spins = (Boolean) fileStore.getAttribute("lucene:spins");
|
|
||||||
this.majorDeviceNumber = (int) fileStore.getAttribute("lucene:major_device_number");
|
this.majorDeviceNumber = (int) fileStore.getAttribute("lucene:major_device_number");
|
||||||
this.minorDeviceNumber = (int) fileStore.getAttribute("lucene:minor_device_number");
|
this.minorDeviceNumber = (int) fileStore.getAttribute("lucene:minor_device_number");
|
||||||
} else {
|
} else {
|
||||||
this.spins = null;
|
|
||||||
this.majorDeviceNumber = -1;
|
this.majorDeviceNumber = -1;
|
||||||
this.minorDeviceNumber = -1;
|
this.minorDeviceNumber = -1;
|
||||||
}
|
}
|
||||||
@ -136,9 +131,13 @@ public final class NodeEnvironment implements Closeable {
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
return "NodePath{" +
|
return "NodePath{" +
|
||||||
"path=" + path +
|
"path=" + path +
|
||||||
", spins=" + spins +
|
", indicesPath=" + indicesPath +
|
||||||
|
", fileStore=" + fileStore +
|
||||||
|
", majorDeviceNumber=" + majorDeviceNumber +
|
||||||
|
", minorDeviceNumber=" + minorDeviceNumber +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private final NodePath[] nodePaths;
|
private final NodePath[] nodePaths;
|
||||||
@ -304,15 +303,6 @@ public final class NodeEnvironment implements Closeable {
|
|||||||
for (NodePath nodePath : nodePaths) {
|
for (NodePath nodePath : nodePaths) {
|
||||||
sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());
|
sb.append('\n').append(" -> ").append(nodePath.path.toAbsolutePath());
|
||||||
|
|
||||||
String spinsDesc;
|
|
||||||
if (nodePath.spins == null) {
|
|
||||||
spinsDesc = "unknown";
|
|
||||||
} else if (nodePath.spins) {
|
|
||||||
spinsDesc = "possibly";
|
|
||||||
} else {
|
|
||||||
spinsDesc = "no";
|
|
||||||
}
|
|
||||||
|
|
||||||
FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
|
FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
|
||||||
sb.append(", free_space [")
|
sb.append(", free_space [")
|
||||||
.append(fsPath.getFree())
|
.append(fsPath.getFree())
|
||||||
@ -320,8 +310,6 @@ public final class NodeEnvironment implements Closeable {
|
|||||||
.append(fsPath.getAvailable())
|
.append(fsPath.getAvailable())
|
||||||
.append("], total_space [")
|
.append("], total_space [")
|
||||||
.append(fsPath.getTotal())
|
.append(fsPath.getTotal())
|
||||||
.append("], spins? [")
|
|
||||||
.append(spinsDesc)
|
|
||||||
.append("], mount [")
|
.append("], mount [")
|
||||||
.append(fsPath.getMount())
|
.append(fsPath.getMount())
|
||||||
.append("], type [")
|
.append("], type [")
|
||||||
@ -332,7 +320,6 @@ public final class NodeEnvironment implements Closeable {
|
|||||||
} else if (logger.isInfoEnabled()) {
|
} else if (logger.isInfoEnabled()) {
|
||||||
FsInfo.Path totFSPath = new FsInfo.Path();
|
FsInfo.Path totFSPath = new FsInfo.Path();
|
||||||
Set<String> allTypes = new HashSet<>();
|
Set<String> allTypes = new HashSet<>();
|
||||||
Set<String> allSpins = new HashSet<>();
|
|
||||||
Set<String> allMounts = new HashSet<>();
|
Set<String> allMounts = new HashSet<>();
|
||||||
for (NodePath nodePath : nodePaths) {
|
for (NodePath nodePath : nodePaths) {
|
||||||
FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
|
FsInfo.Path fsPath = FsProbe.getFSInfo(nodePath);
|
||||||
@ -343,21 +330,13 @@ public final class NodeEnvironment implements Closeable {
|
|||||||
if (type != null) {
|
if (type != null) {
|
||||||
allTypes.add(type);
|
allTypes.add(type);
|
||||||
}
|
}
|
||||||
Boolean spins = fsPath.getSpins();
|
|
||||||
if (spins == null) {
|
|
||||||
allSpins.add("unknown");
|
|
||||||
} else if (spins.booleanValue()) {
|
|
||||||
allSpins.add("possibly");
|
|
||||||
} else {
|
|
||||||
allSpins.add("no");
|
|
||||||
}
|
|
||||||
totFSPath.add(fsPath);
|
totFSPath.add(fsPath);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just log a 1-line summary:
|
// Just log a 1-line summary:
|
||||||
logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], spins? [{}], types [{}]",
|
logger.info("using [{}] data paths, mounts [{}], net usable_space [{}], net total_space [{}], types [{}]",
|
||||||
nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allSpins), toString(allTypes));
|
nodePaths.length, allMounts, totFSPath.getAvailable(), totFSPath.getTotal(), toString(allTypes));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ import java.util.concurrent.ConcurrentMap;
|
|||||||
|
|
||||||
public class GatewayAllocator extends AbstractComponent {
|
public class GatewayAllocator extends AbstractComponent {
|
||||||
|
|
||||||
private RoutingService routingService;
|
private final RoutingService routingService;
|
||||||
|
|
||||||
private final PrimaryShardAllocator primaryShardAllocator;
|
private final PrimaryShardAllocator primaryShardAllocator;
|
||||||
private final ReplicaShardAllocator replicaShardAllocator;
|
private final ReplicaShardAllocator replicaShardAllocator;
|
||||||
@ -52,14 +52,12 @@ public class GatewayAllocator extends AbstractComponent {
|
|||||||
private final ConcurrentMap<ShardId, AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData>> asyncFetchStore = ConcurrentCollections.newConcurrentMap();
|
private final ConcurrentMap<ShardId, AsyncShardFetch<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData>> asyncFetchStore = ConcurrentCollections.newConcurrentMap();
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public GatewayAllocator(Settings settings, final TransportNodesListGatewayStartedShards startedAction, final TransportNodesListShardStoreMetaData storeAction) {
|
public GatewayAllocator(Settings settings, ClusterService clusterService, RoutingService routingService,
|
||||||
|
TransportNodesListGatewayStartedShards startedAction, TransportNodesListShardStoreMetaData storeAction) {
|
||||||
super(settings);
|
super(settings);
|
||||||
|
this.routingService = routingService;
|
||||||
this.primaryShardAllocator = new InternalPrimaryShardAllocator(settings, startedAction);
|
this.primaryShardAllocator = new InternalPrimaryShardAllocator(settings, startedAction);
|
||||||
this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction);
|
this.replicaShardAllocator = new InternalReplicaShardAllocator(settings, storeAction);
|
||||||
}
|
|
||||||
|
|
||||||
public void setReallocation(final ClusterService clusterService, final RoutingService routingService) {
|
|
||||||
this.routingService = routingService;
|
|
||||||
clusterService.addStateApplier(event -> {
|
clusterService.addStateApplier(event -> {
|
||||||
boolean cleanCache = false;
|
boolean cleanCache = false;
|
||||||
DiscoveryNode localNode = event.state().nodes().getLocalNode();
|
DiscoveryNode localNode = event.state().nodes().getLocalNode();
|
||||||
@ -79,6 +77,14 @@ public class GatewayAllocator extends AbstractComponent {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// for tests
|
||||||
|
protected GatewayAllocator(Settings settings) {
|
||||||
|
super(settings);
|
||||||
|
this.routingService = null;
|
||||||
|
this.primaryShardAllocator = null;
|
||||||
|
this.replicaShardAllocator = null;
|
||||||
|
}
|
||||||
|
|
||||||
public int getNumberOfInFlightFetch() {
|
public int getNumberOfInFlightFetch() {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch : asyncFetchStarted.values()) {
|
for (AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch : asyncFetchStarted.values()) {
|
||||||
|
@ -364,13 +364,11 @@ public enum VersionType implements Writeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static VersionType readFromStream(StreamInput in) throws IOException {
|
public static VersionType readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(VersionType.class);
|
||||||
assert (ordinal == 0 || ordinal == 1 || ordinal == 2 || ordinal == 3);
|
|
||||||
return VersionType.values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,26 +44,33 @@ import org.elasticsearch.search.MultiValueMode;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
public class ConstantIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
||||||
|
|
||||||
public static class Builder implements IndexFieldData.Builder {
|
public static class Builder implements IndexFieldData.Builder {
|
||||||
|
|
||||||
|
private final Function<MapperService, String> valueFunction;
|
||||||
|
|
||||||
|
public Builder(Function<MapperService, String> valueFunction) {
|
||||||
|
this.valueFunction = valueFunction;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
|
||||||
CircuitBreakerService breakerService, MapperService mapperService) {
|
CircuitBreakerService breakerService, MapperService mapperService) {
|
||||||
return new IndexIndexFieldData(indexSettings, fieldType.name());
|
return new ConstantIndexFieldData(indexSettings, fieldType.name(), valueFunction.apply(mapperService));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class IndexAtomicFieldData extends AbstractAtomicOrdinalsFieldData {
|
private static class ConstantAtomicFieldData extends AbstractAtomicOrdinalsFieldData {
|
||||||
|
|
||||||
private final String index;
|
private final String value;
|
||||||
|
|
||||||
IndexAtomicFieldData(String index) {
|
ConstantAtomicFieldData(String value) {
|
||||||
super(DEFAULT_SCRIPT_FUNCTION);
|
super(DEFAULT_SCRIPT_FUNCTION);
|
||||||
this.index = index;
|
this.value = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -78,7 +85,7 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SortedSetDocValues getOrdinalsValues() {
|
public SortedSetDocValues getOrdinalsValues() {
|
||||||
final BytesRef term = new BytesRef(index);
|
final BytesRef term = new BytesRef(value);
|
||||||
final SortedDocValues sortedValues = new AbstractSortedDocValues() {
|
final SortedDocValues sortedValues = new AbstractSortedDocValues() {
|
||||||
|
|
||||||
private int docID = -1;
|
private int docID = -1;
|
||||||
@ -120,12 +127,12 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
|||||||
|
|
||||||
private final AtomicOrdinalsFieldData atomicFieldData;
|
private final AtomicOrdinalsFieldData atomicFieldData;
|
||||||
|
|
||||||
private IndexIndexFieldData(IndexSettings indexSettings, String name) {
|
private ConstantIndexFieldData(IndexSettings indexSettings, String name, String value) {
|
||||||
super(indexSettings, name, null, null,
|
super(indexSettings, name, null, null,
|
||||||
TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY,
|
TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY,
|
||||||
TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY,
|
TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY,
|
||||||
TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE);
|
TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE);
|
||||||
atomicFieldData = new IndexAtomicFieldData(index().getName());
|
atomicFieldData = new ConstantAtomicFieldData(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -144,7 +151,8 @@ public class IndexIndexFieldData extends AbstractIndexOrdinalsFieldData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) {
|
public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested,
|
||||||
|
boolean reverse) {
|
||||||
final XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested);
|
final XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested);
|
||||||
return new SortField(getFieldName(), source, reverse);
|
return new SortField(getFieldName(), source, reverse);
|
||||||
}
|
}
|
@ -24,17 +24,16 @@ import org.apache.lucene.search.Query;
|
|||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
import org.elasticsearch.ElasticsearchGenerationException;
|
import org.elasticsearch.ElasticsearchGenerationException;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
|
||||||
import org.elasticsearch.common.compress.CompressedXContent;
|
import org.elasticsearch.common.compress.CompressedXContent;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.text.Text;
|
import org.elasticsearch.common.text.Text;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||||
import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser;
|
import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser;
|
||||||
|
import org.elasticsearch.index.query.QueryShardContext;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
import org.elasticsearch.search.internal.SearchContext;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -241,8 +240,8 @@ public class DocumentMapper implements ToXContent {
|
|||||||
return metadataMapper(IndexFieldMapper.class);
|
return metadataMapper(IndexFieldMapper.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Query typeFilter() {
|
public Query typeFilter(QueryShardContext context) {
|
||||||
return typeMapper().fieldType().termQuery(type, null);
|
return typeMapper().fieldType().termQuery(type, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean hasNestedObjects() {
|
public boolean hasNestedObjects() {
|
||||||
|
@ -30,7 +30,7 @@ import org.elasticsearch.common.lucene.search.Queries;
|
|||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||||
import org.elasticsearch.index.fielddata.plain.IndexIndexFieldData;
|
import org.elasticsearch.index.fielddata.plain.ConstantIndexFieldData;
|
||||||
import org.elasticsearch.index.query.QueryShardContext;
|
import org.elasticsearch.index.query.QueryShardContext;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -157,7 +157,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexFieldData.Builder fielddataBuilder() {
|
public IndexFieldData.Builder fielddataBuilder() {
|
||||||
return new IndexIndexFieldData.Builder();
|
return new ConstantIndexFieldData.Builder(mapperService -> mapperService.index().getName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,12 +99,10 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||||||
public static final Setting<Boolean> INDEX_MAPPING_SINGLE_TYPE_SETTING;
|
public static final Setting<Boolean> INDEX_MAPPING_SINGLE_TYPE_SETTING;
|
||||||
static {
|
static {
|
||||||
Function<Settings, String> defValue = settings -> {
|
Function<Settings, String> defValue = settings -> {
|
||||||
// TODO: uncomment this
|
boolean singleType = true;
|
||||||
/*boolean singleType = true;
|
|
||||||
if (settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) != null) {
|
if (settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) != null) {
|
||||||
singleType = Version.indexCreated(settings).onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED);
|
singleType = Version.indexCreated(settings).onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED);
|
||||||
}*/
|
}
|
||||||
boolean singleType = false;
|
|
||||||
return Boolean.valueOf(singleType).toString();
|
return Boolean.valueOf(singleType).toString();
|
||||||
};
|
};
|
||||||
INDEX_MAPPING_SINGLE_TYPE_SETTING = Setting.boolSetting("index.mapping.single_type", defValue, Property.IndexScope, Property.Final);
|
INDEX_MAPPING_SINGLE_TYPE_SETTING = Setting.boolSetting("index.mapping.single_type", defValue, Property.IndexScope, Property.Final);
|
||||||
|
@ -22,6 +22,8 @@ import org.apache.lucene.document.Field;
|
|||||||
import org.apache.lucene.document.DoubleRange;
|
import org.apache.lucene.document.DoubleRange;
|
||||||
import org.apache.lucene.document.FloatRange;
|
import org.apache.lucene.document.FloatRange;
|
||||||
import org.apache.lucene.document.IntRange;
|
import org.apache.lucene.document.IntRange;
|
||||||
|
import org.apache.lucene.document.InetAddressPoint;
|
||||||
|
import org.apache.lucene.document.InetAddressRange;
|
||||||
import org.apache.lucene.document.LongRange;
|
import org.apache.lucene.document.LongRange;
|
||||||
import org.apache.lucene.document.StoredField;
|
import org.apache.lucene.document.StoredField;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
@ -29,12 +31,12 @@ import org.apache.lucene.index.IndexableField;
|
|||||||
import org.apache.lucene.search.BoostQuery;
|
import org.apache.lucene.search.BoostQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.geo.ShapeRelation;
|
import org.elasticsearch.common.geo.ShapeRelation;
|
||||||
import org.elasticsearch.common.joda.DateMathParser;
|
import org.elasticsearch.common.joda.DateMathParser;
|
||||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||||
|
import org.elasticsearch.common.network.InetAddresses;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.LocaleUtils;
|
import org.elasticsearch.common.util.LocaleUtils;
|
||||||
@ -45,6 +47,7 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||||||
import org.joda.time.DateTimeZone;
|
import org.joda.time.DateTimeZone;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.InetAddress;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
@ -341,8 +344,8 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
RangeFieldType fieldType = fieldType();
|
RangeFieldType fieldType = fieldType();
|
||||||
RangeType rangeType = fieldType.rangeType;
|
RangeType rangeType = fieldType.rangeType;
|
||||||
String fieldName = null;
|
String fieldName = null;
|
||||||
Number from = rangeType.minValue();
|
Object from = rangeType.minValue();
|
||||||
Number to = rangeType.maxValue();
|
Object to = rangeType.maxValue();
|
||||||
boolean includeFrom = DEFAULT_INCLUDE_LOWER;
|
boolean includeFrom = DEFAULT_INCLUDE_LOWER;
|
||||||
boolean includeTo = DEFAULT_INCLUDE_UPPER;
|
boolean includeTo = DEFAULT_INCLUDE_UPPER;
|
||||||
XContentParser.Token token;
|
XContentParser.Token token;
|
||||||
@ -427,10 +430,72 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
|
|
||||||
/** Enum defining the type of range */
|
/** Enum defining the type of range */
|
||||||
public enum RangeType {
|
public enum RangeType {
|
||||||
|
IP("ip_range") {
|
||||||
|
@Override
|
||||||
|
public Field getRangeField(String name, Range r) {
|
||||||
|
return new InetAddressRange(name, (InetAddress)r.from, (InetAddress)r.to);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public InetAddress parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||||
|
throws IOException {
|
||||||
|
InetAddress address = InetAddresses.forString(parser.text());
|
||||||
|
return included ? address : nextUp(address);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public InetAddress parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included)
|
||||||
|
throws IOException {
|
||||||
|
InetAddress address = InetAddresses.forString(parser.text());
|
||||||
|
return included ? address : nextDown(address);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public InetAddress parse(Object value, boolean coerce) {
|
||||||
|
return value instanceof InetAddress ? (InetAddress) value : InetAddresses.forString((String) value);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public InetAddress minValue() {
|
||||||
|
return InetAddressPoint.MIN_VALUE;
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public InetAddress maxValue() {
|
||||||
|
return InetAddressPoint.MAX_VALUE;
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public InetAddress nextUp(Object value) {
|
||||||
|
return InetAddressPoint.nextUp((InetAddress)value);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public InetAddress nextDown(Object value) {
|
||||||
|
return InetAddressPoint.nextDown((InetAddress)value);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||||
|
InetAddress lower = (InetAddress)from;
|
||||||
|
InetAddress upper = (InetAddress)to;
|
||||||
|
return InetAddressRange.newWithinQuery(field,
|
||||||
|
includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper));
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||||
|
InetAddress lower = (InetAddress)from;
|
||||||
|
InetAddress upper = (InetAddress)to;
|
||||||
|
return InetAddressRange.newContainsQuery(field,
|
||||||
|
includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper));
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||||
|
InetAddress lower = (InetAddress)from;
|
||||||
|
InetAddress upper = (InetAddress)to;
|
||||||
|
return InetAddressRange.newIntersectsQuery(field,
|
||||||
|
includeLower ? lower : nextUp(lower), includeUpper ? upper : nextDown(upper));
|
||||||
|
}
|
||||||
|
public String toString(InetAddress address) {
|
||||||
|
return InetAddresses.toAddrString(address);
|
||||||
|
}
|
||||||
|
},
|
||||||
DATE("date_range", NumberType.LONG) {
|
DATE("date_range", NumberType.LONG) {
|
||||||
@Override
|
@Override
|
||||||
public Field getRangeField(String name, Range r) {
|
public Field getRangeField(String name, Range r) {
|
||||||
return new LongRange(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()});
|
return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()});
|
||||||
}
|
}
|
||||||
private Number parse(DateMathParser dateMathParser, String dateStr) {
|
private Number parse(DateMathParser dateMathParser, String dateStr) {
|
||||||
return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");});
|
return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");});
|
||||||
@ -456,16 +521,12 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return Long.MAX_VALUE;
|
return Long.MAX_VALUE;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Number nextUp(Number value) {
|
public Long nextUp(Object value) {
|
||||||
return LONG.nextUp(value);
|
return (long) LONG.nextUp(value);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Number nextDown(Number value) {
|
public Long nextDown(Object value) {
|
||||||
return LONG.nextDown(value);
|
return (long) LONG.nextDown(value);
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public byte[] getBytes(Range r) {
|
|
||||||
return LONG.getBytes(r);
|
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper,
|
public Query rangeQuery(String field, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper,
|
||||||
@ -484,15 +545,15 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return super.rangeQuery(field, low, high, includeLower, includeUpper, relation, zone, dateMathParser, context);
|
return super.rangeQuery(field, low, high, includeLower, includeUpper, relation, zone, dateMathParser, context);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query withinQuery(String field, Number from, Number to, boolean includeLower, boolean includeUpper) {
|
public Query withinQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||||
return LONG.withinQuery(field, from, to, includeLower, includeUpper);
|
return LONG.withinQuery(field, from, to, includeLower, includeUpper);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query containsQuery(String field, Number from, Number to, boolean includeLower, boolean includeUpper) {
|
public Query containsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||||
return LONG.containsQuery(field, from, to, includeLower, includeUpper);
|
return LONG.containsQuery(field, from, to, includeLower, includeUpper);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query intersectsQuery(String field, Number from, Number to, boolean includeLower, boolean includeUpper) {
|
public Query intersectsQuery(String field, Object from, Object to, boolean includeLower, boolean includeUpper) {
|
||||||
return LONG.intersectsQuery(field, from, to, includeLower, includeUpper);
|
return LONG.intersectsQuery(field, from, to, includeLower, includeUpper);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -507,38 +568,31 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return Float.POSITIVE_INFINITY;
|
return Float.POSITIVE_INFINITY;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Float nextUp(Number value) {
|
public Float nextUp(Object value) {
|
||||||
return Math.nextUp(value.floatValue());
|
return Math.nextUp(((Number)value).floatValue());
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Float nextDown(Number value) {
|
public Float nextDown(Object value) {
|
||||||
return Math.nextDown(value.floatValue());
|
return Math.nextDown(((Number)value).floatValue());
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Field getRangeField(String name, Range r) {
|
public Field getRangeField(String name, Range r) {
|
||||||
return new FloatRange(name, new float[] {r.from.floatValue()}, new float[] {r.to.floatValue()});
|
return new FloatRange(name, new float[] {((Number)r.from).floatValue()}, new float[] {((Number)r.to).floatValue()});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public byte[] getBytes(Range r) {
|
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
byte[] b = new byte[Float.BYTES*2];
|
|
||||||
NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(r.from.floatValue()), b, 0);
|
|
||||||
NumericUtils.intToSortableBytes(NumericUtils.floatToSortableInt(r.to.floatValue()), b, Float.BYTES);
|
|
||||||
return b;
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
|
||||||
return FloatRange.newWithinQuery(field,
|
return FloatRange.newWithinQuery(field,
|
||||||
new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
|
new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
|
||||||
new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
|
new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return FloatRange.newContainsQuery(field,
|
return FloatRange.newContainsQuery(field,
|
||||||
new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
|
new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
|
||||||
new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
|
new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return FloatRange.newIntersectsQuery(field,
|
return FloatRange.newIntersectsQuery(field,
|
||||||
new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
|
new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)},
|
||||||
new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
|
new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)});
|
||||||
@ -554,38 +608,31 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return Double.POSITIVE_INFINITY;
|
return Double.POSITIVE_INFINITY;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Double nextUp(Number value) {
|
public Double nextUp(Object value) {
|
||||||
return Math.nextUp(value.doubleValue());
|
return Math.nextUp(((Number)value).doubleValue());
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Double nextDown(Number value) {
|
public Double nextDown(Object value) {
|
||||||
return Math.nextDown(value.doubleValue());
|
return Math.nextDown(((Number)value).doubleValue());
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Field getRangeField(String name, Range r) {
|
public Field getRangeField(String name, Range r) {
|
||||||
return new DoubleRange(name, new double[] {r.from.doubleValue()}, new double[] {r.to.doubleValue()});
|
return new DoubleRange(name, new double[] {((Number)r.from).doubleValue()}, new double[] {((Number)r.to).doubleValue()});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public byte[] getBytes(Range r) {
|
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
byte[] b = new byte[Double.BYTES*2];
|
|
||||||
NumericUtils.longToSortableBytes(NumericUtils.doubleToSortableLong(r.from.doubleValue()), b, 0);
|
|
||||||
NumericUtils.longToSortableBytes(NumericUtils.doubleToSortableLong(r.to.doubleValue()), b, Double.BYTES);
|
|
||||||
return b;
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
|
||||||
return DoubleRange.newWithinQuery(field,
|
return DoubleRange.newWithinQuery(field,
|
||||||
new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
|
new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
|
||||||
new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
|
new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return DoubleRange.newContainsQuery(field,
|
return DoubleRange.newContainsQuery(field,
|
||||||
new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
|
new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
|
||||||
new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
|
new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return DoubleRange.newIntersectsQuery(field,
|
return DoubleRange.newIntersectsQuery(field,
|
||||||
new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
|
new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)},
|
||||||
new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
|
new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)});
|
||||||
@ -603,36 +650,29 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return Integer.MAX_VALUE;
|
return Integer.MAX_VALUE;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Integer nextUp(Number value) {
|
public Integer nextUp(Object value) {
|
||||||
return value.intValue() + 1;
|
return ((Number)value).intValue() + 1;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Integer nextDown(Number value) {
|
public Integer nextDown(Object value) {
|
||||||
return value.intValue() - 1;
|
return ((Number)value).intValue() - 1;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Field getRangeField(String name, Range r) {
|
public Field getRangeField(String name, Range r) {
|
||||||
return new IntRange(name, new int[] {r.from.intValue()}, new int[] {r.to.intValue()});
|
return new IntRange(name, new int[] {((Number)r.from).intValue()}, new int[] {((Number)r.to).intValue()});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public byte[] getBytes(Range r) {
|
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
byte[] b = new byte[Integer.BYTES*2];
|
|
||||||
NumericUtils.intToSortableBytes(r.from.intValue(), b, 0);
|
|
||||||
NumericUtils.intToSortableBytes(r.to.intValue(), b, Integer.BYTES);
|
|
||||||
return b;
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
|
||||||
return IntRange.newWithinQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
|
return IntRange.newWithinQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
|
||||||
new int[] {(Integer)to - (includeTo ? 0 : 1)});
|
new int[] {(Integer)to - (includeTo ? 0 : 1)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return IntRange.newContainsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
|
return IntRange.newContainsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
|
||||||
new int[] {(Integer)to - (includeTo ? 0 : 1)});
|
new int[] {(Integer)to - (includeTo ? 0 : 1)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return IntRange.newIntersectsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
|
return IntRange.newIntersectsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)},
|
||||||
new int[] {(Integer)to - (includeTo ? 0 : 1)});
|
new int[] {(Integer)to - (includeTo ? 0 : 1)});
|
||||||
}
|
}
|
||||||
@ -647,43 +687,40 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return Long.MAX_VALUE;
|
return Long.MAX_VALUE;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Long nextUp(Number value) {
|
public Long nextUp(Object value) {
|
||||||
return value.longValue() + 1;
|
return ((Number)value).longValue() + 1;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Long nextDown(Number value) {
|
public Long nextDown(Object value) {
|
||||||
return value.longValue() - 1;
|
return ((Number)value).longValue() - 1;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Field getRangeField(String name, Range r) {
|
public Field getRangeField(String name, Range r) {
|
||||||
return new LongRange(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()});
|
return new LongRange(name, new long[] {((Number)r.from).longValue()},
|
||||||
|
new long[] {((Number)r.to).longValue()});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public byte[] getBytes(Range r) {
|
public Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
byte[] b = new byte[Long.BYTES*2];
|
|
||||||
long from = r.from == null ? Long.MIN_VALUE : r.from.longValue();
|
|
||||||
long to = r.to == null ? Long.MAX_VALUE : r.to.longValue();
|
|
||||||
NumericUtils.longToSortableBytes(from, b, 0);
|
|
||||||
NumericUtils.longToSortableBytes(to, b, Long.BYTES);
|
|
||||||
return b;
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
|
||||||
return LongRange.newWithinQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)},
|
return LongRange.newWithinQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)},
|
||||||
new long[] {(Long)to - (includeTo ? 0 : 1)});
|
new long[] {(Long)to - (includeTo ? 0 : 1)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return LongRange.newContainsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)},
|
return LongRange.newContainsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)},
|
||||||
new long[] {(Long)to - (includeTo ? 0 : 1)});
|
new long[] {(Long)to - (includeTo ? 0 : 1)});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
return LongRange.newIntersectsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)},
|
return LongRange.newIntersectsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)},
|
||||||
new long[] {(Long)to - (includeTo ? 0 : 1)});
|
new long[] {(Long)to - (includeTo ? 0 : 1)});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
RangeType(String name) {
|
||||||
|
this.name = name;
|
||||||
|
this.numberType = null;
|
||||||
|
}
|
||||||
|
|
||||||
RangeType(String name, NumberType type) {
|
RangeType(String name, NumberType type) {
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.numberType = type;
|
this.numberType = type;
|
||||||
@ -694,7 +731,6 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract byte[] getBytes(Range range);
|
|
||||||
public abstract Field getRangeField(String name, Range range);
|
public abstract Field getRangeField(String name, Range range);
|
||||||
public List<IndexableField> createFields(String name, Range range, boolean indexed, boolean docValued, boolean stored) {
|
public List<IndexableField> createFields(String name, Range range, boolean indexed, boolean docValued, boolean stored) {
|
||||||
assert range != null : "range cannot be null when creating fields";
|
assert range != null : "range cannot be null when creating fields";
|
||||||
@ -709,29 +745,31 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
return fields;
|
return fields;
|
||||||
}
|
}
|
||||||
/** parses from value. rounds according to included flag */
|
/** parses from value. rounds according to included flag */
|
||||||
public Number parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException {
|
public Object parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException {
|
||||||
Number value = numberType.parse(parser, coerce);
|
Number value = numberType.parse(parser, coerce);
|
||||||
return included ? value : nextUp(value);
|
return included ? value : (Number)nextUp(value);
|
||||||
}
|
}
|
||||||
/** parses to value. rounds according to included flag */
|
/** parses to value. rounds according to included flag */
|
||||||
public Number parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException {
|
public Object parseTo(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) throws IOException {
|
||||||
Number value = numberType.parse(parser, coerce);
|
Number value = numberType.parse(parser, coerce);
|
||||||
return included ? value : nextDown(value);
|
return included ? value : (Number)nextDown(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
public abstract Number minValue();
|
public abstract Object minValue();
|
||||||
public abstract Number maxValue();
|
public abstract Object maxValue();
|
||||||
public abstract Number nextUp(Number value);
|
public abstract Object nextUp(Object value);
|
||||||
public abstract Number nextDown(Number value);
|
public abstract Object nextDown(Object value);
|
||||||
public abstract Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo);
|
public abstract Query withinQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||||
public abstract Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo);
|
public abstract Query containsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||||
public abstract Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo);
|
public abstract Query intersectsQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo);
|
||||||
|
public Object parse(Object value, boolean coerce) {
|
||||||
|
return numberType.parse(value, coerce);
|
||||||
|
}
|
||||||
public Query rangeQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo,
|
public Query rangeQuery(String field, Object from, Object to, boolean includeFrom, boolean includeTo,
|
||||||
ShapeRelation relation, @Nullable DateTimeZone timeZone, @Nullable DateMathParser dateMathParser,
|
ShapeRelation relation, @Nullable DateTimeZone timeZone, @Nullable DateMathParser dateMathParser,
|
||||||
QueryShardContext context) {
|
QueryShardContext context) {
|
||||||
Number lower = from == null ? minValue() : numberType.parse(from, false);
|
Object lower = from == null ? minValue() : parse(from, false);
|
||||||
Number upper = to == null ? maxValue() : numberType.parse(to, false);
|
Object upper = to == null ? maxValue() : parse(to, false);
|
||||||
if (relation == ShapeRelation.WITHIN) {
|
if (relation == ShapeRelation.WITHIN) {
|
||||||
return withinQuery(field, lower, upper, includeFrom, includeTo);
|
return withinQuery(field, lower, upper, includeFrom, includeTo);
|
||||||
} else if (relation == ShapeRelation.CONTAINS) {
|
} else if (relation == ShapeRelation.CONTAINS) {
|
||||||
@ -747,12 +785,12 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
/** Class defining a range */
|
/** Class defining a range */
|
||||||
public static class Range {
|
public static class Range {
|
||||||
RangeType type;
|
RangeType type;
|
||||||
private Number from;
|
private Object from;
|
||||||
private Number to;
|
private Object to;
|
||||||
private boolean includeFrom;
|
private boolean includeFrom;
|
||||||
private boolean includeTo;
|
private boolean includeTo;
|
||||||
|
|
||||||
public Range(RangeType type, Number from, Number to, boolean includeFrom, boolean includeTo) {
|
public Range(RangeType type, Object from, Object to, boolean includeFrom, boolean includeTo) {
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.from = from;
|
this.from = from;
|
||||||
this.to = to;
|
this.to = to;
|
||||||
@ -764,9 +802,11 @@ public class RangeFieldMapper extends FieldMapper {
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append(includeFrom ? '[' : '(');
|
sb.append(includeFrom ? '[' : '(');
|
||||||
sb.append(includeFrom || from.equals(type.minValue()) ? from : type.nextDown(from));
|
Object f = includeFrom || from.equals(type.minValue()) ? from : type.nextDown(from);
|
||||||
sb.append(':');
|
Object t = includeTo || to.equals(type.maxValue()) ? to : type.nextUp(to);
|
||||||
sb.append(includeTo || to.equals(type.maxValue()) ? to : type.nextUp(to));
|
sb.append(type == RangeType.IP ? InetAddresses.toAddrString((InetAddress)f) : f.toString());
|
||||||
|
sb.append(" : ");
|
||||||
|
sb.append(type == RangeType.IP ? InetAddresses.toAddrString((InetAddress)t) : t.toString());
|
||||||
sb.append(includeTo ? ']' : ')');
|
sb.append(includeTo ? ']' : ')');
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
@ -30,26 +30,29 @@ import org.apache.lucene.search.BooleanClause;
|
|||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
import org.apache.lucene.search.ConstantScoreQuery;
|
import org.apache.lucene.search.ConstantScoreQuery;
|
||||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||||
|
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.TermQuery;
|
import org.apache.lucene.search.TermQuery;
|
||||||
import org.apache.lucene.search.TermInSetQuery;
|
import org.apache.lucene.search.TermInSetQuery;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
|
import org.elasticsearch.common.lucene.search.Queries;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
||||||
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
|
import org.elasticsearch.index.fielddata.plain.ConstantIndexFieldData;
|
||||||
import org.elasticsearch.index.query.QueryShardContext;
|
import org.elasticsearch.index.query.QueryShardContext;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
public class TypeFieldMapper extends MetadataFieldMapper {
|
public class TypeFieldMapper extends MetadataFieldMapper {
|
||||||
|
|
||||||
@ -88,29 +91,12 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static final class TypeFieldType extends StringFieldType {
|
static final class TypeFieldType extends StringFieldType {
|
||||||
private boolean fielddata;
|
|
||||||
|
|
||||||
TypeFieldType() {
|
TypeFieldType() {
|
||||||
this.fielddata = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected TypeFieldType(TypeFieldType ref) {
|
protected TypeFieldType(TypeFieldType ref) {
|
||||||
super(ref);
|
super(ref);
|
||||||
this.fielddata = ref.fielddata;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object o) {
|
|
||||||
if (super.equals(o) == false) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
TypeFieldType that = (TypeFieldType) o;
|
|
||||||
return fielddata == that.fielddata;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
return Objects.hash(super.hashCode(), fielddata);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -123,49 +109,76 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||||||
return CONTENT_TYPE;
|
return CONTENT_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean fielddata() {
|
|
||||||
return fielddata;
|
|
||||||
}
|
|
||||||
|
|
||||||
public void setFielddata(boolean fielddata) {
|
|
||||||
checkIfFrozen();
|
|
||||||
this.fielddata = fielddata;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexFieldData.Builder fielddataBuilder() {
|
public IndexFieldData.Builder fielddataBuilder() {
|
||||||
if (hasDocValues()) {
|
if (hasDocValues()) {
|
||||||
return new DocValuesIndexFieldData.Builder();
|
return new DocValuesIndexFieldData.Builder();
|
||||||
|
} else {
|
||||||
|
// means the index has a single type and the type field is implicit
|
||||||
|
Function<MapperService, String> typeFunction = mapperService -> {
|
||||||
|
Collection<String> types = mapperService.types();
|
||||||
|
if (types.size() > 1) {
|
||||||
|
throw new AssertionError();
|
||||||
|
}
|
||||||
|
// If we reach here, there is necessarily one type since we were able to find a `_type` field
|
||||||
|
String type = types.iterator().next();
|
||||||
|
return type;
|
||||||
|
};
|
||||||
|
return new ConstantIndexFieldData.Builder(typeFunction);
|
||||||
}
|
}
|
||||||
assert indexOptions() != IndexOptions.NONE;
|
|
||||||
if (fielddata) {
|
|
||||||
return new PagedBytesIndexFieldData.Builder(TextFieldMapper.Defaults.FIELDDATA_MIN_FREQUENCY,
|
|
||||||
TextFieldMapper.Defaults.FIELDDATA_MAX_FREQUENCY,
|
|
||||||
TextFieldMapper.Defaults.FIELDDATA_MIN_SEGMENT_SIZE);
|
|
||||||
}
|
|
||||||
return super.fielddataBuilder();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
public FieldStats<?> stats(IndexReader reader) throws IOException {
|
||||||
if (indexOptions() == IndexOptions.NONE) {
|
if (reader.maxDoc() == 0) {
|
||||||
throw new AssertionError();
|
return null;
|
||||||
}
|
}
|
||||||
return new TypesQuery(indexedValueForSearch(value));
|
return new FieldStats.Text(reader.maxDoc(), reader.numDocs(), reader.maxDoc(), reader.maxDoc(),
|
||||||
|
isSearchable(), isAggregatable());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void checkCompatibility(MappedFieldType other,
|
public boolean isSearchable() {
|
||||||
List<String> conflicts, boolean strict) {
|
return true;
|
||||||
super.checkCompatibility(other, conflicts, strict);
|
}
|
||||||
TypeFieldType otherType = (TypeFieldType) other;
|
|
||||||
if (strict) {
|
@Override
|
||||||
if (fielddata() != otherType.fielddata()) {
|
public Query termQuery(Object value, QueryShardContext context) {
|
||||||
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] "
|
return termsQuery(Arrays.asList(value), context);
|
||||||
+ "across all types.");
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Query termsQuery(List<?> values, QueryShardContext context) {
|
||||||
|
if (context.getIndexSettings().getValue(MapperService.INDEX_MAPPING_SINGLE_TYPE_SETTING)) {
|
||||||
|
Collection<String> indexTypes = context.getMapperService().types();
|
||||||
|
if (indexTypes.isEmpty()) {
|
||||||
|
return new MatchNoDocsQuery("No types");
|
||||||
}
|
}
|
||||||
|
assert indexTypes.size() == 1;
|
||||||
|
BytesRef indexType = indexedValueForSearch(indexTypes.iterator().next());
|
||||||
|
if (values.stream()
|
||||||
|
.map(this::indexedValueForSearch)
|
||||||
|
.anyMatch(indexType::equals)) {
|
||||||
|
if (context.getMapperService().hasNested()) {
|
||||||
|
// type filters are expected not to match nested docs
|
||||||
|
return Queries.newNonNestedFilter();
|
||||||
|
} else {
|
||||||
|
return new MatchAllDocsQuery();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return new MatchNoDocsQuery("Type list does not contain the index type");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (indexOptions() == IndexOptions.NONE) {
|
||||||
|
throw new AssertionError();
|
||||||
|
}
|
||||||
|
final BytesRef[] types = values.stream()
|
||||||
|
.map(this::indexedValueForSearch)
|
||||||
|
.toArray(size -> new BytesRef[size]);
|
||||||
|
return new TypesQuery(types);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -261,7 +274,13 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||||||
|
|
||||||
private static MappedFieldType defaultFieldType(Settings indexSettings) {
|
private static MappedFieldType defaultFieldType(Settings indexSettings) {
|
||||||
MappedFieldType defaultFieldType = Defaults.FIELD_TYPE.clone();
|
MappedFieldType defaultFieldType = Defaults.FIELD_TYPE.clone();
|
||||||
defaultFieldType.setHasDocValues(true);
|
if (MapperService.INDEX_MAPPING_SINGLE_TYPE_SETTING.get(indexSettings)) {
|
||||||
|
defaultFieldType.setIndexOptions(IndexOptions.NONE);
|
||||||
|
defaultFieldType.setHasDocValues(false);
|
||||||
|
} else {
|
||||||
|
defaultFieldType.setIndexOptions(IndexOptions.DOCS);
|
||||||
|
defaultFieldType.setHasDocValues(true);
|
||||||
|
}
|
||||||
return defaultFieldType;
|
return defaultFieldType;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,10 +338,10 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||||||
}
|
}
|
||||||
|
|
||||||
// wrap the query with type query
|
// wrap the query with type query
|
||||||
innerQuery = Queries.filtered(innerQuery, childDocMapper.typeFilter());
|
innerQuery = Queries.filtered(innerQuery, childDocMapper.typeFilter(context));
|
||||||
|
|
||||||
final ParentChildIndexFieldData parentChildIndexFieldData = context.getForField(parentFieldMapper.fieldType());
|
final ParentChildIndexFieldData parentChildIndexFieldData = context.getForField(parentFieldMapper.fieldType());
|
||||||
return new LateParsingQuery(parentDocMapper.typeFilter(), innerQuery, minChildren(), maxChildren(),
|
return new LateParsingQuery(parentDocMapper.typeFilter(context), innerQuery, minChildren(), maxChildren(),
|
||||||
parentType, scoreMode, parentChildIndexFieldData, context.getSearchSimilarity());
|
parentType, scoreMode, parentChildIndexFieldData, context.getSearchSimilarity());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,18 +185,18 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||||||
Query childrenQuery;
|
Query childrenQuery;
|
||||||
if (childTypes.size() == 1) {
|
if (childTypes.size() == 1) {
|
||||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(childTypes.iterator().next());
|
DocumentMapper documentMapper = context.getMapperService().documentMapper(childTypes.iterator().next());
|
||||||
childrenQuery = documentMapper.typeFilter();
|
childrenQuery = documentMapper.typeFilter(context);
|
||||||
} else {
|
} else {
|
||||||
BooleanQuery.Builder childrenFilter = new BooleanQuery.Builder();
|
BooleanQuery.Builder childrenFilter = new BooleanQuery.Builder();
|
||||||
for (String childrenTypeStr : childTypes) {
|
for (String childrenTypeStr : childTypes) {
|
||||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(childrenTypeStr);
|
DocumentMapper documentMapper = context.getMapperService().documentMapper(childrenTypeStr);
|
||||||
childrenFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD);
|
childrenFilter.add(documentMapper.typeFilter(context), BooleanClause.Occur.SHOULD);
|
||||||
}
|
}
|
||||||
childrenQuery = childrenFilter.build();
|
childrenQuery = childrenFilter.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
// wrap the query with type query
|
// wrap the query with type query
|
||||||
innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter());
|
innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter(context));
|
||||||
return new HasChildQueryBuilder.LateParsingQuery(childrenQuery,
|
return new HasChildQueryBuilder.LateParsingQuery(childrenQuery,
|
||||||
innerQuery,
|
innerQuery,
|
||||||
HasChildQueryBuilder.DEFAULT_MIN_CHILDREN,
|
HasChildQueryBuilder.DEFAULT_MIN_CHILDREN,
|
||||||
|
@ -54,16 +54,12 @@ public enum Operator implements Writeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static Operator readFromStream(StreamInput in) throws IOException {
|
public static Operator readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(Operator.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown Operator ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Operator fromString(String op) {
|
public static Operator fromString(String op) {
|
||||||
|
@ -69,14 +69,14 @@ public class QueryRewriteContext {
|
|||||||
* Returns the index settings for this context. This might return null if the
|
* Returns the index settings for this context. This might return null if the
|
||||||
* context has not index scope.
|
* context has not index scope.
|
||||||
*/
|
*/
|
||||||
public final IndexSettings getIndexSettings() {
|
public IndexSettings getIndexSettings() {
|
||||||
return indexSettings;
|
return indexSettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the MapperService.
|
* Return the MapperService.
|
||||||
*/
|
*/
|
||||||
public final MapperService getMapperService() {
|
public MapperService getMapperService() {
|
||||||
return mapperService;
|
return mapperService;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> {
|
|||||||
// no type means no documents
|
// no type means no documents
|
||||||
return new MatchNoDocsQuery();
|
return new MatchNoDocsQuery();
|
||||||
} else {
|
} else {
|
||||||
return documentMapper.typeFilter();
|
return documentMapper.typeFilter(context);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1523,20 +1523,20 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||||||
verifyReplicationTarget();
|
verifyReplicationTarget();
|
||||||
final SequenceNumbersService seqNoService = getEngine().seqNoService();
|
final SequenceNumbersService seqNoService = getEngine().seqNoService();
|
||||||
final long localCheckpoint = seqNoService.getLocalCheckpoint();
|
final long localCheckpoint = seqNoService.getLocalCheckpoint();
|
||||||
if (globalCheckpoint <= localCheckpoint) {
|
if (globalCheckpoint > localCheckpoint) {
|
||||||
seqNoService.updateGlobalCheckpointOnReplica(globalCheckpoint);
|
|
||||||
} else {
|
|
||||||
/*
|
/*
|
||||||
* This can happen during recovery when the shard has started its engine but recovery is not finalized and is receiving global
|
* This can happen during recovery when the shard has started its engine but recovery is not finalized and is receiving global
|
||||||
* checkpoint updates from in-flight operations. However, since this shard is not yet contributing to calculating the global
|
* checkpoint updates. However, since this shard is not yet contributing to calculating the global checkpoint, it can be the
|
||||||
* checkpoint, it can be the case that the global checkpoint update from the primary is ahead of the local checkpoint on this
|
* case that the global checkpoint update from the primary is ahead of the local checkpoint on this shard. In this case, we
|
||||||
* shard. In this case, we ignore the global checkpoint update. This should only happen if we are in the translog stage of
|
* ignore the global checkpoint update. This can happen if we are in the translog stage of recovery. Prior to this, the engine
|
||||||
* recovery. Prior to this, the engine is not opened and this shard will not receive global checkpoint updates, and after this
|
* is not opened and this shard will not receive global checkpoint updates, and after this the shard will be contributing to
|
||||||
* the shard will be contributing to calculations of the the global checkpoint.
|
* calculations of the the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as
|
||||||
|
* while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move
|
||||||
|
* to recovery finalization, or even finished recovery before the update arrives here.
|
||||||
*/
|
*/
|
||||||
assert recoveryState().getStage() == RecoveryState.Stage.TRANSLOG
|
return;
|
||||||
: "expected recovery stage [" + RecoveryState.Stage.TRANSLOG + "] but was [" + recoveryState().getStage() + "]";
|
|
||||||
}
|
}
|
||||||
|
seqNoService.updateGlobalCheckpointOnReplica(globalCheckpoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -49,10 +49,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||||||
long free = -1;
|
long free = -1;
|
||||||
long available = -1;
|
long available = -1;
|
||||||
|
|
||||||
/** Uses Lucene's {@code IOUtils.spins} method to try to determine if the device backed by spinning media.
|
|
||||||
* This is null if we could not determine it, true if it possibly spins, else false. */
|
|
||||||
Boolean spins = null;
|
|
||||||
|
|
||||||
public Path() {
|
public Path() {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,7 +70,9 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||||||
total = in.readLong();
|
total = in.readLong();
|
||||||
free = in.readLong();
|
free = in.readLong();
|
||||||
available = in.readLong();
|
available = in.readLong();
|
||||||
spins = in.readOptionalBoolean();
|
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||||
|
in.readOptionalBoolean();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -85,7 +83,9 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||||||
out.writeLong(total);
|
out.writeLong(total);
|
||||||
out.writeLong(free);
|
out.writeLong(free);
|
||||||
out.writeLong(available);
|
out.writeLong(available);
|
||||||
out.writeOptionalBoolean(spins);
|
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||||
|
out.writeOptionalBoolean(null);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getPath() {
|
public String getPath() {
|
||||||
@ -112,10 +112,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||||||
return new ByteSizeValue(available);
|
return new ByteSizeValue(available);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Boolean getSpins() {
|
|
||||||
return spins;
|
|
||||||
}
|
|
||||||
|
|
||||||
private long addLong(long current, long other) {
|
private long addLong(long current, long other) {
|
||||||
if (other == -1) {
|
if (other == -1) {
|
||||||
return current;
|
return current;
|
||||||
@ -140,10 +136,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||||||
total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total));
|
total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total));
|
||||||
free = FsProbe.adjustForHugeFilesystems(addLong(free, path.free));
|
free = FsProbe.adjustForHugeFilesystems(addLong(free, path.free));
|
||||||
available = FsProbe.adjustForHugeFilesystems(addLong(available, path.available));
|
available = FsProbe.adjustForHugeFilesystems(addLong(available, path.available));
|
||||||
if (path.spins != null && path.spins.booleanValue()) {
|
|
||||||
// Spinning is contagious!
|
|
||||||
spins = Boolean.TRUE;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static final class Fields {
|
static final class Fields {
|
||||||
@ -156,7 +148,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||||||
static final String FREE_IN_BYTES = "free_in_bytes";
|
static final String FREE_IN_BYTES = "free_in_bytes";
|
||||||
static final String AVAILABLE = "available";
|
static final String AVAILABLE = "available";
|
||||||
static final String AVAILABLE_IN_BYTES = "available_in_bytes";
|
static final String AVAILABLE_IN_BYTES = "available_in_bytes";
|
||||||
static final String SPINS = "spins";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -181,9 +172,6 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||||||
if (available != -1) {
|
if (available != -1) {
|
||||||
builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, available);
|
builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, available);
|
||||||
}
|
}
|
||||||
if (spins != null) {
|
|
||||||
builder.field(Fields.SPINS, spins.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
|
@ -159,7 +159,6 @@ public class FsProbe extends AbstractComponent {
|
|||||||
fsPath.available = nodePath.fileStore.getUsableSpace();
|
fsPath.available = nodePath.fileStore.getUsableSpace();
|
||||||
fsPath.type = nodePath.fileStore.type();
|
fsPath.type = nodePath.fileStore.type();
|
||||||
fsPath.mount = nodePath.fileStore.toString();
|
fsPath.mount = nodePath.fileStore.toString();
|
||||||
fsPath.spins = nodePath.spins;
|
|
||||||
return fsPath;
|
return fsPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.RoutingService;
|
import org.elasticsearch.cluster.routing.RoutingService;
|
||||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.StopWatch;
|
import org.elasticsearch.common.StopWatch;
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
@ -352,7 +351,7 @@ public class Node implements Closeable {
|
|||||||
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, clusterInfoService);
|
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool, clusterInfoService);
|
||||||
modules.add(new NodeModule(this, monitorService));
|
modules.add(new NodeModule(this, monitorService));
|
||||||
ClusterModule clusterModule = new ClusterModule(settings, clusterService,
|
ClusterModule clusterModule = new ClusterModule(settings, clusterService,
|
||||||
pluginsService.filterPlugins(ClusterPlugin.class));
|
pluginsService.filterPlugins(ClusterPlugin.class), clusterInfoService);
|
||||||
modules.add(clusterModule);
|
modules.add(clusterModule);
|
||||||
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
|
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class));
|
||||||
modules.add(indicesModule);
|
modules.add(indicesModule);
|
||||||
@ -437,7 +436,8 @@ public class Node implements Closeable {
|
|||||||
|
|
||||||
final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry,
|
final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry,
|
||||||
networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(),
|
networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(),
|
||||||
clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class));
|
clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class),
|
||||||
|
clusterModule.getAllocationService());
|
||||||
NodeService nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(),
|
NodeService nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(),
|
||||||
transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),
|
transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),
|
||||||
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter());
|
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter());
|
||||||
@ -488,6 +488,9 @@ public class Node implements Closeable {
|
|||||||
);
|
);
|
||||||
injector = modules.createInjector();
|
injector = modules.createInjector();
|
||||||
|
|
||||||
|
// TODO hack around circular dependencies problems in AllocationService
|
||||||
|
clusterModule.getAllocationService().setGatewayAllocator(injector.getInstance(GatewayAllocator.class));
|
||||||
|
|
||||||
List<LifecycleComponent> pluginLifecycleComponents = pluginComponents.stream()
|
List<LifecycleComponent> pluginLifecycleComponents = pluginComponents.stream()
|
||||||
.filter(p -> p instanceof LifecycleComponent)
|
.filter(p -> p instanceof LifecycleComponent)
|
||||||
.map(p -> (LifecycleComponent) p).collect(Collectors.toList());
|
.map(p -> (LifecycleComponent) p).collect(Collectors.toList());
|
||||||
@ -644,8 +647,6 @@ public class Node implements Closeable {
|
|||||||
|
|
||||||
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
|
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
|
||||||
logger.info("starting ...");
|
logger.info("starting ...");
|
||||||
// hack around dependency injection problem (for now...)
|
|
||||||
injector.getInstance(Discovery.class).setAllocationService(injector.getInstance(AllocationService.class));
|
|
||||||
pluginLifecycleComponents.forEach(LifecycleComponent::start);
|
pluginLifecycleComponents.forEach(LifecycleComponent::start);
|
||||||
|
|
||||||
injector.getInstance(MappingUpdatedAction.class).setClient(client);
|
injector.getInstance(MappingUpdatedAction.class).setClient(client);
|
||||||
@ -663,9 +664,6 @@ public class Node implements Closeable {
|
|||||||
nodeConnectionsService.start();
|
nodeConnectionsService.start();
|
||||||
clusterService.setNodeConnectionsService(nodeConnectionsService);
|
clusterService.setNodeConnectionsService(nodeConnectionsService);
|
||||||
|
|
||||||
// TODO hack around circular dependencies problems
|
|
||||||
injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class));
|
|
||||||
|
|
||||||
injector.getInstance(ResourceWatcherService.class).start();
|
injector.getInstance(ResourceWatcherService.class).start();
|
||||||
injector.getInstance(GatewayService.class).start();
|
injector.getInstance(GatewayService.class).start();
|
||||||
Discovery discovery = injector.getInstance(Discovery.class);
|
Discovery discovery = injector.getInstance(Discovery.class);
|
||||||
|
@ -23,6 +23,7 @@ import java.util.Collections;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
|
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||||
import org.elasticsearch.cluster.service.MasterService;
|
import org.elasticsearch.cluster.service.MasterService;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
@ -68,7 +69,8 @@ public interface DiscoveryPlugin {
|
|||||||
MasterService masterService,
|
MasterService masterService,
|
||||||
ClusterApplier clusterApplier,
|
ClusterApplier clusterApplier,
|
||||||
ClusterSettings clusterSettings,
|
ClusterSettings clusterSettings,
|
||||||
UnicastHostsProvider hostsProvider) {
|
UnicastHostsProvider hostsProvider,
|
||||||
|
AllocationService allocationService) {
|
||||||
return Collections.emptyMap();
|
return Collections.emptyMap();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,6 +76,7 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UncheckedIOException;
|
import java.io.UncheckedIOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
@ -290,13 +291,13 @@ final class DefaultSearchContext extends SearchContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Query createTypeFilter(String[] types) {
|
private Query createTypeFilter(String[] types) {
|
||||||
if (types != null && types.length >= 1) {
|
if (types != null && types.length >= 1) {
|
||||||
BytesRef[] typesBytes = new BytesRef[types.length];
|
MappedFieldType ft = mapperService().fullName(TypeFieldMapper.NAME);
|
||||||
for (int i = 0; i < typesBytes.length; i++) {
|
if (ft != null) {
|
||||||
typesBytes[i] = new BytesRef(types[i]);
|
// ft might be null if no documents have been indexed yet
|
||||||
|
return ft.termsQuery(Arrays.asList(types), queryShardContext);
|
||||||
}
|
}
|
||||||
return new TypeFieldMapper.TypesQuery(typesBytes);
|
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -948,14 +948,10 @@ public enum MultiValueMode implements Writeable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static MultiValueMode readMultiValueModeFrom(StreamInput in) throws IOException {
|
public static MultiValueMode readMultiValueModeFrom(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(MultiValueMode.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown MultiValueMode ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,16 +139,12 @@ public abstract class Aggregator extends BucketCollector implements Releasable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static SubAggCollectionMode readFromStream(StreamInput in) throws IOException {
|
public static SubAggCollectionMode readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(SubAggCollectionMode.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown SubAggCollectionMode ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,8 +98,8 @@ public class ChildrenAggregationBuilder extends ValuesSourceAggregationBuilder<P
|
|||||||
parentType = parentFieldMapper.type();
|
parentType = parentFieldMapper.type();
|
||||||
DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType);
|
DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType);
|
||||||
if (parentDocMapper != null) {
|
if (parentDocMapper != null) {
|
||||||
parentFilter = parentDocMapper.typeFilter();
|
parentFilter = parentDocMapper.typeFilter(context.getQueryShardContext());
|
||||||
childFilter = childDocMapper.typeFilter();
|
childFilter = childDocMapper.typeFilter(context.getQueryShardContext());
|
||||||
ParentChildIndexFieldData parentChildIndexFieldData = context.fieldData()
|
ParentChildIndexFieldData parentChildIndexFieldData = context.fieldData()
|
||||||
.getForField(parentFieldMapper.fieldType());
|
.getForField(parentFieldMapper.fieldType());
|
||||||
config.fieldContext(new FieldContext(parentFieldMapper.fieldType().name(), parentChildIndexFieldData,
|
config.fieldContext(new FieldContext(parentFieldMapper.fieldType().name(), parentChildIndexFieldData,
|
||||||
|
@ -76,7 +76,7 @@ public class DoubleTerms extends InternalMappedTerms<DoubleTerms, DoubleTerms.Bu
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
int compareTerm(Terms.Bucket other) {
|
public int compareTerm(Terms.Bucket other) {
|
||||||
return Double.compare(term, ((Number) other.getKey()).doubleValue());
|
return Double.compare(term, ((Number) other.getKey()).doubleValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
int compareTerm(Terms.Bucket other) {
|
public int compareTerm(Terms.Bucket other) {
|
||||||
return Long.compare(globalOrd, ((OrdBucket) other).globalOrd);
|
return Long.compare(globalOrd, ((OrdBucket) other).globalOrd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.search.DocValueFormat;
|
|||||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
@ -101,7 +102,7 @@ public abstract class InternalMappedTerms<A extends InternalTerms<A, B>, B exten
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<B> getBucketsInternal() {
|
public List<B> getBuckets() {
|
||||||
return buckets;
|
return buckets;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,15 +40,13 @@ import java.util.List;
|
|||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
import static java.util.Collections.unmodifiableList;
|
|
||||||
|
|
||||||
public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends InternalTerms.Bucket<B>>
|
public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends InternalTerms.Bucket<B>>
|
||||||
extends InternalMultiBucketAggregation<A, B> implements Terms, ToXContent {
|
extends InternalMultiBucketAggregation<A, B> implements Terms, ToXContent {
|
||||||
|
|
||||||
protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound");
|
protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound");
|
||||||
protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count");
|
protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count");
|
||||||
|
|
||||||
public abstract static class Bucket<B extends Bucket<B>> extends Terms.Bucket {
|
public abstract static class Bucket<B extends Bucket<B>> extends InternalMultiBucketAggregation.InternalBucket implements Terms.Bucket {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reads a bucket. Should be a constructor reference.
|
* Reads a bucket. Should be a constructor reference.
|
||||||
@ -212,11 +210,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
|
|||||||
protected abstract void writeTermTypeInfoTo(StreamOutput out) throws IOException;
|
protected abstract void writeTermTypeInfoTo(StreamOutput out) throws IOException;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final List<Terms.Bucket> getBuckets() {
|
public abstract List<B> getBuckets();
|
||||||
return unmodifiableList(getBucketsInternal());
|
|
||||||
}
|
|
||||||
|
|
||||||
protected abstract List<B> getBucketsInternal();
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public abstract B getBucketByKey(String term);
|
public abstract B getBucketByKey(String term);
|
||||||
@ -244,7 +238,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
|
|||||||
}
|
}
|
||||||
otherDocCount += terms.getSumOfOtherDocCounts();
|
otherDocCount += terms.getSumOfOtherDocCounts();
|
||||||
final long thisAggDocCountError;
|
final long thisAggDocCountError;
|
||||||
if (terms.getBucketsInternal().size() < getShardSize() || InternalOrder.isTermOrder(order)) {
|
if (terms.getBuckets().size() < getShardSize() || InternalOrder.isTermOrder(order)) {
|
||||||
thisAggDocCountError = 0;
|
thisAggDocCountError = 0;
|
||||||
} else if (InternalOrder.isCountDesc(this.order)) {
|
} else if (InternalOrder.isCountDesc(this.order)) {
|
||||||
if (terms.getDocCountError() > 0) {
|
if (terms.getDocCountError() > 0) {
|
||||||
@ -254,7 +248,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
|
|||||||
} else {
|
} else {
|
||||||
// otherwise use the doc count of the last term in the
|
// otherwise use the doc count of the last term in the
|
||||||
// aggregation
|
// aggregation
|
||||||
thisAggDocCountError = terms.getBucketsInternal().get(terms.getBucketsInternal().size() - 1).docCount;
|
thisAggDocCountError = terms.getBuckets().get(terms.getBuckets().size() - 1).docCount;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
thisAggDocCountError = -1;
|
thisAggDocCountError = -1;
|
||||||
@ -267,7 +261,7 @@ public abstract class InternalTerms<A extends InternalTerms<A, B>, B extends Int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
setDocCountError(thisAggDocCountError);
|
setDocCountError(thisAggDocCountError);
|
||||||
for (B bucket : terms.getBucketsInternal()) {
|
for (B bucket : terms.getBuckets()) {
|
||||||
// If there is already a doc count error for this bucket
|
// If there is already a doc count error for this bucket
|
||||||
// subtract this aggs doc count error from it to make the
|
// subtract this aggs doc count error from it to make the
|
||||||
// new value for the bucket. This then means that when the
|
// new value for the bucket. This then means that when the
|
||||||
|
@ -76,7 +76,7 @@ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket>
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
int compareTerm(Terms.Bucket other) {
|
public int compareTerm(Terms.Bucket other) {
|
||||||
return Long.compare(term, ((Number) other.getKey()).longValue());
|
return Long.compare(term, ((Number) other.getKey()).longValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ public class LongTerms extends InternalMappedTerms<LongTerms, LongTerms.Bucket>
|
|||||||
* Converts a {@link LongTerms} into a {@link DoubleTerms}, returning the value of the specified long terms as doubles.
|
* Converts a {@link LongTerms} into a {@link DoubleTerms}, returning the value of the specified long terms as doubles.
|
||||||
*/
|
*/
|
||||||
static DoubleTerms convertLongTermsToDouble(LongTerms longTerms, DocValueFormat decimalFormat) {
|
static DoubleTerms convertLongTermsToDouble(LongTerms longTerms, DocValueFormat decimalFormat) {
|
||||||
List<Terms.Bucket> buckets = longTerms.getBuckets();
|
List<LongTerms.Bucket> buckets = longTerms.getBuckets();
|
||||||
List<DoubleTerms.Bucket> newBuckets = new ArrayList<>();
|
List<DoubleTerms.Bucket> newBuckets = new ArrayList<>();
|
||||||
for (Terms.Bucket bucket : buckets) {
|
for (Terms.Bucket bucket : buckets) {
|
||||||
newBuckets.add(new DoubleTerms.Bucket(bucket.getKeyAsNumber().doubleValue(),
|
newBuckets.add(new DoubleTerms.Bucket(bucket.getKeyAsNumber().doubleValue(),
|
||||||
|
@ -75,7 +75,7 @@ public class StringTerms extends InternalMappedTerms<StringTerms, StringTerms.Bu
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
int compareTerm(Terms.Bucket other) {
|
public int compareTerm(Terms.Bucket other) {
|
||||||
return termBytes.compareTo(((Bucket) other).termBytes);
|
return termBytes.compareTo(((Bucket) other).termBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.terms;
|
|||||||
|
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.search.aggregations.Aggregator;
|
import org.elasticsearch.search.aggregations.Aggregator;
|
||||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
|
||||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
@ -33,50 +32,23 @@ import java.util.List;
|
|||||||
*/
|
*/
|
||||||
public interface Terms extends MultiBucketsAggregation {
|
public interface Terms extends MultiBucketsAggregation {
|
||||||
|
|
||||||
enum ValueType {
|
|
||||||
|
|
||||||
STRING(org.elasticsearch.search.aggregations.support.ValueType.STRING),
|
|
||||||
LONG(org.elasticsearch.search.aggregations.support.ValueType.LONG),
|
|
||||||
DOUBLE(org.elasticsearch.search.aggregations.support.ValueType.DOUBLE);
|
|
||||||
|
|
||||||
final org.elasticsearch.search.aggregations.support.ValueType scriptValueType;
|
|
||||||
|
|
||||||
ValueType(org.elasticsearch.search.aggregations.support.ValueType scriptValueType) {
|
|
||||||
this.scriptValueType = scriptValueType;
|
|
||||||
}
|
|
||||||
|
|
||||||
static ValueType resolveType(String type) {
|
|
||||||
if ("string".equals(type)) {
|
|
||||||
return STRING;
|
|
||||||
}
|
|
||||||
if ("double".equals(type) || "float".equals(type)) {
|
|
||||||
return DOUBLE;
|
|
||||||
}
|
|
||||||
if ("long".equals(type) || "integer".equals(type) || "short".equals(type) || "byte".equals(type)) {
|
|
||||||
return LONG;
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A bucket that is associated with a single term
|
* A bucket that is associated with a single term
|
||||||
*/
|
*/
|
||||||
abstract class Bucket extends InternalMultiBucketAggregation.InternalBucket {
|
interface Bucket extends MultiBucketsAggregation.Bucket {
|
||||||
|
|
||||||
public abstract Number getKeyAsNumber();
|
Number getKeyAsNumber();
|
||||||
|
|
||||||
abstract int compareTerm(Terms.Bucket other);
|
int compareTerm(Terms.Bucket other);
|
||||||
|
|
||||||
public abstract long getDocCountError();
|
|
||||||
|
|
||||||
|
long getDocCountError();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the sorted list of the buckets in this terms aggregation.
|
* Return the sorted list of the buckets in this terms aggregation.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
List<Bucket> getBuckets();
|
List<? extends Bucket> getBuckets();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the bucket for the given term, or null if there is no such bucket.
|
* Get the bucket for the given term, or null if there is no such bucket.
|
||||||
|
@ -127,7 +127,7 @@ public class UnmappedTerms extends InternalTerms<UnmappedTerms, UnmappedTerms.Bu
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected List<Bucket> getBucketsInternal() {
|
public List<Bucket> getBuckets() {
|
||||||
return emptyList();
|
return emptyList();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -53,20 +53,16 @@ public enum PercentilesMethod implements Writeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public static PercentilesMethod readFromStream(StreamInput in) throws IOException {
|
public static PercentilesMethod readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(PercentilesMethod.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown PercentilesMethod ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return parseField.getPreferredName();
|
return parseField.getPreferredName();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ public final class InnerHitsContext {
|
|||||||
// Only include docs that have the current hit as parent
|
// Only include docs that have the current hit as parent
|
||||||
.add(hitQuery, Occur.FILTER)
|
.add(hitQuery, Occur.FILTER)
|
||||||
// Only include docs that have this inner hits type
|
// Only include docs that have this inner hits type
|
||||||
.add(documentMapper.typeFilter(), Occur.FILTER)
|
.add(documentMapper.typeFilter(context.getQueryShardContext()), Occur.FILTER)
|
||||||
.build();
|
.build();
|
||||||
if (size() == 0) {
|
if (size() == 0) {
|
||||||
final int count = context.searcher().count(q);
|
final int count = context.searcher().count(q);
|
||||||
|
@ -504,16 +504,12 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
|
|||||||
NONE, SCORE;
|
NONE, SCORE;
|
||||||
|
|
||||||
public static Order readFromStream(StreamInput in) throws IOException {
|
public static Order readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(Order.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown Order ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Order fromString(String order) {
|
public static Order fromString(String order) {
|
||||||
@ -533,16 +529,12 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
|
|||||||
CHARS, WORD, SENTENCE;
|
CHARS, WORD, SENTENCE;
|
||||||
|
|
||||||
public static BoundaryScannerType readFromStream(StreamInput in) throws IOException {
|
public static BoundaryScannerType readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(BoundaryScannerType.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown BoundaryScannerType ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BoundaryScannerType fromString(String boundaryScannerType) {
|
public static BoundaryScannerType fromString(String boundaryScannerType) {
|
||||||
|
@ -44,6 +44,8 @@ public final class AliasFilter implements Writeable {
|
|||||||
private final QueryBuilder filter;
|
private final QueryBuilder filter;
|
||||||
private final boolean reparseAliases;
|
private final boolean reparseAliases;
|
||||||
|
|
||||||
|
public static final AliasFilter EMPTY = new AliasFilter(null, Strings.EMPTY_ARRAY);
|
||||||
|
|
||||||
public AliasFilter(QueryBuilder filter, String... aliases) {
|
public AliasFilter(QueryBuilder filter, String... aliases) {
|
||||||
this.aliases = aliases == null ? Strings.EMPTY_ARRAY : aliases;
|
this.aliases = aliases == null ? Strings.EMPTY_ARRAY : aliases;
|
||||||
this.filter = filter;
|
this.filter = filter;
|
||||||
|
@ -86,16 +86,12 @@ public enum QueryRescoreMode implements Writeable {
|
|||||||
public abstract float combine(float primary, float secondary);
|
public abstract float combine(float primary, float secondary);
|
||||||
|
|
||||||
public static QueryRescoreMode readFromStream(StreamInput in) throws IOException {
|
public static QueryRescoreMode readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(QueryRescoreMode.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown ScoreMode ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static QueryRescoreMode fromString(String scoreMode) {
|
public static QueryRescoreMode fromString(String scoreMode) {
|
||||||
@ -111,4 +107,4 @@ public enum QueryRescoreMode implements Writeable {
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
return name().toLowerCase(Locale.ROOT);
|
return name().toLowerCase(Locale.ROOT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -350,18 +350,14 @@ public class ScriptSortBuilder extends SortBuilder<ScriptSortBuilder> {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(final StreamOutput out) throws IOException {
|
public void writeTo(final StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read from a stream.
|
* Read from a stream.
|
||||||
*/
|
*/
|
||||||
static ScriptSortType readFromStream(final StreamInput in) throws IOException {
|
static ScriptSortType readFromStream(final StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(ScriptSortType.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown ScriptSortType ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ScriptSortType fromString(final String str) {
|
public static ScriptSortType fromString(final String str) {
|
||||||
|
@ -52,15 +52,11 @@ public enum SortMode implements Writeable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(final StreamOutput out) throws IOException {
|
public void writeTo(final StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SortMode readFromStream(StreamInput in) throws IOException {
|
public static SortMode readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(SortMode.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown SortMode ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SortMode fromString(final String str) {
|
public static SortMode fromString(final String str) {
|
||||||
@ -85,4 +81,4 @@ public enum SortMode implements Writeable {
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
return name().toLowerCase(Locale.ROOT);
|
return name().toLowerCase(Locale.ROOT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -52,16 +52,12 @@ public enum SortOrder implements Writeable {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static SortOrder readFromStream(StreamInput in) throws IOException {
|
static SortOrder readFromStream(StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(SortOrder.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown SortOrder ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(this.ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SortOrder fromString(String op) {
|
public static SortOrder fromString(String op) {
|
||||||
|
@ -38,15 +38,11 @@ public enum SortBy implements Writeable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(final StreamOutput out) throws IOException {
|
public void writeTo(final StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SortBy readFromStream(final StreamInput in) throws IOException {
|
public static SortBy readFromStream(final StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(SortBy.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown SortBy ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SortBy resolve(final String str) {
|
public static SortBy resolve(final String str) {
|
||||||
|
@ -511,15 +511,11 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(final StreamOutput out) throws IOException {
|
public void writeTo(final StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SuggestMode readFromStream(final StreamInput in) throws IOException {
|
public static SuggestMode readFromStream(final StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(SuggestMode.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown SuggestMode ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static SuggestMode resolve(final String str) {
|
public static SuggestMode resolve(final String str) {
|
||||||
@ -571,15 +567,11 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(final StreamOutput out) throws IOException {
|
public void writeTo(final StreamOutput out) throws IOException {
|
||||||
out.writeVInt(ordinal());
|
out.writeEnum(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static StringDistanceImpl readFromStream(final StreamInput in) throws IOException {
|
public static StringDistanceImpl readFromStream(final StreamInput in) throws IOException {
|
||||||
int ordinal = in.readVInt();
|
return in.readEnum(StringDistanceImpl.class);
|
||||||
if (ordinal < 0 || ordinal >= values().length) {
|
|
||||||
throw new IOException("Unknown StringDistanceImpl ordinal [" + ordinal + "]");
|
|
||||||
}
|
|
||||||
return values()[ordinal];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static StringDistanceImpl resolve(final String str) {
|
public static StringDistanceImpl resolve(final String str) {
|
||||||
|
@ -159,4 +159,8 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||||||
throw new IllegalArgumentException("port must be a number", e);
|
throw new IllegalArgumentException("port must be a number", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static final String buildRemoteIndexName(String clusterAlias, String indexName) {
|
||||||
|
return clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + indexName;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,7 +152,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||||||
/**
|
/**
|
||||||
* Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end.
|
* Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end.
|
||||||
*/
|
*/
|
||||||
public void fetchSearchShards(SearchRequest searchRequest, final String[] indices,
|
public void fetchSearchShards(ClusterSearchShardsRequest searchRequest,
|
||||||
ActionListener<ClusterSearchShardsResponse> listener) {
|
ActionListener<ClusterSearchShardsResponse> listener) {
|
||||||
if (connectedNodes.isEmpty()) {
|
if (connectedNodes.isEmpty()) {
|
||||||
// just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener
|
// just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener
|
||||||
@ -160,18 +160,15 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||||||
// we can't proceed with a search on a cluster level.
|
// we can't proceed with a search on a cluster level.
|
||||||
// in the future we might want to just skip the remote nodes in such a case but that can already be implemented on the caller
|
// in the future we might want to just skip the remote nodes in such a case but that can already be implemented on the caller
|
||||||
// end since they provide the listener.
|
// end since they provide the listener.
|
||||||
connectHandler.connect(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, indices, listener), listener::onFailure));
|
connectHandler.connect(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, listener), listener::onFailure));
|
||||||
} else {
|
} else {
|
||||||
fetchShardsInternal(searchRequest, indices, listener);
|
fetchShardsInternal(searchRequest, listener);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void fetchShardsInternal(SearchRequest searchRequest, String[] indices,
|
private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest,
|
||||||
final ActionListener<ClusterSearchShardsResponse> listener) {
|
final ActionListener<ClusterSearchShardsResponse> listener) {
|
||||||
final DiscoveryNode node = nodeSupplier.get();
|
final DiscoveryNode node = nodeSupplier.get();
|
||||||
ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices)
|
|
||||||
.indicesOptions(searchRequest.indicesOptions()).local(true).preference(searchRequest.preference())
|
|
||||||
.routing(searchRequest.routing());
|
|
||||||
transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest,
|
transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest,
|
||||||
new TransportResponseHandler<ClusterSearchShardsResponse>() {
|
new TransportResponseHandler<ClusterSearchShardsResponse>() {
|
||||||
|
|
||||||
@ -224,7 +221,13 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
Transport.Connection getConnection() {
|
||||||
|
DiscoveryNode discoveryNode = nodeSupplier.get();
|
||||||
|
return transportService.getConnection(discoveryNode);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
connectHandler.close();
|
connectHandler.close();
|
||||||
}
|
}
|
||||||
|
@ -24,10 +24,12 @@ import org.elasticsearch.Version;
|
|||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.OriginalIndices;
|
import org.elasticsearch.action.OriginalIndices;
|
||||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||||
|
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.action.search.SearchShardIterator;
|
import org.elasticsearch.action.search.SearchShardIterator;
|
||||||
import org.elasticsearch.action.support.GroupedActionListener;
|
import org.elasticsearch.action.support.GroupedActionListener;
|
||||||
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.common.Booleans;
|
import org.elasticsearch.common.Booleans;
|
||||||
@ -176,6 +178,25 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||||||
return remoteClusters.get(remoteCluster).isNodeConnected(node);
|
return remoteClusters.get(remoteCluster).isNodeConnected(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Map<String, OriginalIndices> groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate<String> indexExists) {
|
||||||
|
Map<String, OriginalIndices> originalIndicesMap = new HashMap<>();
|
||||||
|
if (isCrossClusterSearchEnabled()) {
|
||||||
|
final Map<String, List<String>> groupedIndices = groupClusterIndices(indices, indexExists);
|
||||||
|
for (Map.Entry<String, List<String>> entry : groupedIndices.entrySet()) {
|
||||||
|
String clusterAlias = entry.getKey();
|
||||||
|
List<String> originalIndices = entry.getValue();
|
||||||
|
originalIndicesMap.put(clusterAlias,
|
||||||
|
new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions));
|
||||||
|
}
|
||||||
|
if (originalIndicesMap.containsKey(LOCAL_CLUSTER_GROUP_KEY) == false) {
|
||||||
|
originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(indices, indicesOptions));
|
||||||
|
}
|
||||||
|
return originalIndicesMap;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns <code>true</code> iff the given cluster is configured as a remote cluster. Otherwise <code>false</code>
|
* Returns <code>true</code> iff the given cluster is configured as a remote cluster. Otherwise <code>false</code>
|
||||||
*/
|
*/
|
||||||
@ -183,8 +204,9 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||||||
return remoteClusters.containsKey(clusterName);
|
return remoteClusters.containsKey(clusterName);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void collectSearchShards(SearchRequest searchRequest, Map<String, OriginalIndices> remoteIndicesByCluster,
|
public void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing,
|
||||||
ActionListener<Map<String, ClusterSearchShardsResponse>> listener) {
|
Map<String, OriginalIndices> remoteIndicesByCluster,
|
||||||
|
ActionListener<Map<String, ClusterSearchShardsResponse>> listener) {
|
||||||
final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size());
|
final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size());
|
||||||
final Map<String, ClusterSearchShardsResponse> searchShardsResponses = new ConcurrentHashMap<>();
|
final Map<String, ClusterSearchShardsResponse> searchShardsResponses = new ConcurrentHashMap<>();
|
||||||
final AtomicReference<TransportException> transportException = new AtomicReference<>();
|
final AtomicReference<TransportException> transportException = new AtomicReference<>();
|
||||||
@ -195,7 +217,10 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||||||
throw new IllegalArgumentException("no such remote cluster: " + clusterName);
|
throw new IllegalArgumentException("no such remote cluster: " + clusterName);
|
||||||
}
|
}
|
||||||
final String[] indices = entry.getValue().indices();
|
final String[] indices = entry.getValue().indices();
|
||||||
remoteClusterConnection.fetchSearchShards(searchRequest, indices,
|
ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices)
|
||||||
|
.indicesOptions(indicesOptions).local(true).preference(preference)
|
||||||
|
.routing(routing);
|
||||||
|
remoteClusterConnection.fetchSearchShards(searchShardsRequest,
|
||||||
new ActionListener<ClusterSearchShardsResponse>() {
|
new ActionListener<ClusterSearchShardsResponse>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) {
|
public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) {
|
||||||
@ -240,6 +265,14 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||||||
return connection.getConnection(node);
|
return connection.getConnection(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Transport.Connection getConnection(String cluster) {
|
||||||
|
RemoteClusterConnection connection = remoteClusters.get(cluster);
|
||||||
|
if (connection == null) {
|
||||||
|
throw new IllegalArgumentException("no such remote cluster: " + cluster);
|
||||||
|
}
|
||||||
|
return connection.getConnection();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Set<String> getRemoteClusterNames() {
|
protected Set<String> getRemoteClusterNames() {
|
||||||
return this.remoteClusters.keySet();
|
return this.remoteClusters.keySet();
|
||||||
|
@ -120,6 +120,7 @@ grant {
|
|||||||
permission java.io.FilePermission "/proc/sys/vm/max_map_count", "read";
|
permission java.io.FilePermission "/proc/sys/vm/max_map_count", "read";
|
||||||
|
|
||||||
// io stats on Linux
|
// io stats on Linux
|
||||||
|
permission java.io.FilePermission "/proc/self/mountinfo", "read";
|
||||||
permission java.io.FilePermission "/proc/diskstats", "read";
|
permission java.io.FilePermission "/proc/diskstats", "read";
|
||||||
|
|
||||||
// control group stats on Linux
|
// control group stats on Linux
|
||||||
|
@ -183,7 +183,6 @@ public class NodeStatsTests extends ESTestCase {
|
|||||||
assertEquals(fs.getTotal().getFree(), deserializedFs.getTotal().getFree());
|
assertEquals(fs.getTotal().getFree(), deserializedFs.getTotal().getFree());
|
||||||
assertEquals(fs.getTotal().getMount(), deserializedFs.getTotal().getMount());
|
assertEquals(fs.getTotal().getMount(), deserializedFs.getTotal().getMount());
|
||||||
assertEquals(fs.getTotal().getPath(), deserializedFs.getTotal().getPath());
|
assertEquals(fs.getTotal().getPath(), deserializedFs.getTotal().getPath());
|
||||||
assertEquals(fs.getTotal().getSpins(), deserializedFs.getTotal().getSpins());
|
|
||||||
assertEquals(fs.getTotal().getType(), deserializedFs.getTotal().getType());
|
assertEquals(fs.getTotal().getType(), deserializedFs.getTotal().getType());
|
||||||
FsInfo.IoStats ioStats = fs.getIoStats();
|
FsInfo.IoStats ioStats = fs.getIoStats();
|
||||||
FsInfo.IoStats deserializedIoStats = deserializedFs.getIoStats();
|
FsInfo.IoStats deserializedIoStats = deserializedFs.getIoStats();
|
||||||
|
@ -64,7 +64,6 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||||||
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
|
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public void testMergeShardsIterators() throws IOException {
|
public void testMergeShardsIterators() throws IOException {
|
||||||
List<ShardIterator> localShardIterators = new ArrayList<>();
|
List<ShardIterator> localShardIterators = new ArrayList<>();
|
||||||
{
|
{
|
||||||
@ -159,7 +158,8 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||||||
new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)
|
new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)
|
||||||
};
|
};
|
||||||
Map<String, AliasFilter> indicesAndAliases = new HashMap<>();
|
Map<String, AliasFilter> indicesAndAliases = new HashMap<>();
|
||||||
indicesAndAliases.put("foo", new AliasFilter(new TermsQueryBuilder("foo", "bar"), Strings.EMPTY_ARRAY));
|
indicesAndAliases.put("foo", new AliasFilter(new TermsQueryBuilder("foo", "bar"), "some_alias_for_foo",
|
||||||
|
"some_other_foo_alias"));
|
||||||
indicesAndAliases.put("bar", new AliasFilter(new MatchAllQueryBuilder(), Strings.EMPTY_ARRAY));
|
indicesAndAliases.put("bar", new AliasFilter(new MatchAllQueryBuilder(), Strings.EMPTY_ARRAY));
|
||||||
ClusterSearchShardsGroup[] groups = new ClusterSearchShardsGroup[] {
|
ClusterSearchShardsGroup[] groups = new ClusterSearchShardsGroup[] {
|
||||||
new ClusterSearchShardsGroup(new ShardId("foo", "foo_id", 0),
|
new ClusterSearchShardsGroup(new ShardId("foo", "foo_id", 0),
|
||||||
@ -180,7 +180,9 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||||||
new ClusterSearchShardsGroup(new ShardId("xyz", "xyz_id", 0),
|
new ClusterSearchShardsGroup(new ShardId("xyz", "xyz_id", 0),
|
||||||
new ShardRouting[] {TestShardRouting.newShardRouting("xyz", 0, "node3", true, ShardRoutingState.STARTED)})
|
new ShardRouting[] {TestShardRouting.newShardRouting("xyz", 0, "node3", true, ShardRoutingState.STARTED)})
|
||||||
};
|
};
|
||||||
searchShardsResponseMap.put("test_cluster_2", new ClusterSearchShardsResponse(groups2, nodes2, null));
|
Map<String, AliasFilter> filter = new HashMap<>();
|
||||||
|
filter.put("xyz", new AliasFilter(null, "some_alias_for_xyz"));
|
||||||
|
searchShardsResponseMap.put("test_cluster_2", new ClusterSearchShardsResponse(groups2, nodes2, filter));
|
||||||
|
|
||||||
Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
|
Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
|
||||||
remoteIndicesByCluster.put("test_cluster_1",
|
remoteIndicesByCluster.put("test_cluster_1",
|
||||||
@ -193,7 +195,8 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||||||
assertEquals(4, iteratorList.size());
|
assertEquals(4, iteratorList.size());
|
||||||
for (SearchShardIterator iterator : iteratorList) {
|
for (SearchShardIterator iterator : iteratorList) {
|
||||||
if (iterator.shardId().getIndexName().endsWith("foo")) {
|
if (iterator.shardId().getIndexName().endsWith("foo")) {
|
||||||
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
|
assertArrayEquals(new String[]{"some_alias_for_foo", "some_other_foo_alias"},
|
||||||
|
iterator.getOriginalIndices().indices());
|
||||||
assertTrue(iterator.shardId().getId() == 0 || iterator.shardId().getId() == 1);
|
assertTrue(iterator.shardId().getId() == 0 || iterator.shardId().getId() == 1);
|
||||||
assertEquals("test_cluster_1:foo", iterator.shardId().getIndexName());
|
assertEquals("test_cluster_1:foo", iterator.shardId().getIndexName());
|
||||||
ShardRouting shardRouting = iterator.nextOrNull();
|
ShardRouting shardRouting = iterator.nextOrNull();
|
||||||
@ -204,7 +207,7 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||||||
assertEquals(shardRouting.getIndexName(), "foo");
|
assertEquals(shardRouting.getIndexName(), "foo");
|
||||||
assertNull(iterator.nextOrNull());
|
assertNull(iterator.nextOrNull());
|
||||||
} else if (iterator.shardId().getIndexName().endsWith("bar")) {
|
} else if (iterator.shardId().getIndexName().endsWith("bar")) {
|
||||||
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
|
assertArrayEquals(new String[]{"bar"}, iterator.getOriginalIndices().indices());
|
||||||
assertEquals(0, iterator.shardId().getId());
|
assertEquals(0, iterator.shardId().getId());
|
||||||
assertEquals("test_cluster_1:bar", iterator.shardId().getIndexName());
|
assertEquals("test_cluster_1:bar", iterator.shardId().getIndexName());
|
||||||
ShardRouting shardRouting = iterator.nextOrNull();
|
ShardRouting shardRouting = iterator.nextOrNull();
|
||||||
@ -215,7 +218,7 @@ public class TransportSearchActionTests extends ESTestCase {
|
|||||||
assertEquals(shardRouting.getIndexName(), "bar");
|
assertEquals(shardRouting.getIndexName(), "bar");
|
||||||
assertNull(iterator.nextOrNull());
|
assertNull(iterator.nextOrNull());
|
||||||
} else if (iterator.shardId().getIndexName().endsWith("xyz")) {
|
} else if (iterator.shardId().getIndexName().endsWith("xyz")) {
|
||||||
assertArrayEquals(new String[]{"x*"}, iterator.getOriginalIndices().indices());
|
assertArrayEquals(new String[]{"some_alias_for_xyz"}, iterator.getOriginalIndices().indices());
|
||||||
assertEquals(0, iterator.shardId().getId());
|
assertEquals(0, iterator.shardId().getId());
|
||||||
assertEquals("test_cluster_2:xyz", iterator.shardId().getIndexName());
|
assertEquals("test_cluster_2:xyz", iterator.shardId().getIndexName());
|
||||||
ShardRouting shardRouting = iterator.nextOrNull();
|
ShardRouting shardRouting = iterator.nextOrNull();
|
||||||
|
@ -57,6 +57,7 @@ import java.util.Map;
|
|||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
public class ClusterModuleTests extends ModuleTestCase {
|
public class ClusterModuleTests extends ModuleTestCase {
|
||||||
|
private ClusterInfoService clusterInfoService = EmptyClusterInfoService.INSTANCE;
|
||||||
private ClusterService clusterService = new ClusterService(Settings.EMPTY,
|
private ClusterService clusterService = new ClusterService(Settings.EMPTY,
|
||||||
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null);
|
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null);
|
||||||
static class FakeAllocationDecider extends AllocationDecider {
|
static class FakeAllocationDecider extends AllocationDecider {
|
||||||
@ -114,7 +115,7 @@ public class ClusterModuleTests extends ModuleTestCase {
|
|||||||
public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
|
public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
|
||||||
return Collections.singletonList(new EnableAllocationDecider(settings, clusterSettings));
|
return Collections.singletonList(new EnableAllocationDecider(settings, clusterSettings));
|
||||||
}
|
}
|
||||||
})));
|
}), clusterInfoService));
|
||||||
assertEquals(e.getMessage(),
|
assertEquals(e.getMessage(),
|
||||||
"Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice");
|
"Cannot specify allocation decider [" + EnableAllocationDecider.class.getName() + "] twice");
|
||||||
}
|
}
|
||||||
@ -126,8 +127,8 @@ public class ClusterModuleTests extends ModuleTestCase {
|
|||||||
public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
|
public Collection<AllocationDecider> createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
|
||||||
return Collections.singletonList(new FakeAllocationDecider(settings));
|
return Collections.singletonList(new FakeAllocationDecider(settings));
|
||||||
}
|
}
|
||||||
}));
|
}), clusterInfoService);
|
||||||
assertTrue(module.allocationDeciders.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class)));
|
assertTrue(module.deciderList.stream().anyMatch(d -> d.getClass().equals(FakeAllocationDecider.class)));
|
||||||
}
|
}
|
||||||
|
|
||||||
private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, String name, Supplier<ShardsAllocator> supplier) {
|
private ClusterModule newClusterModuleWithShardsAllocator(Settings settings, String name, Supplier<ShardsAllocator> supplier) {
|
||||||
@ -138,7 +139,7 @@ public class ClusterModuleTests extends ModuleTestCase {
|
|||||||
return Collections.singletonMap(name, supplier);
|
return Collections.singletonMap(name, supplier);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
));
|
), clusterInfoService);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRegisterShardsAllocator() {
|
public void testRegisterShardsAllocator() {
|
||||||
@ -156,7 +157,7 @@ public class ClusterModuleTests extends ModuleTestCase {
|
|||||||
public void testUnknownShardsAllocator() {
|
public void testUnknownShardsAllocator() {
|
||||||
Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "dne").build();
|
Settings settings = Settings.builder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "dne").build();
|
||||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||||
new ClusterModule(settings, clusterService, Collections.emptyList()));
|
new ClusterModule(settings, clusterService, Collections.emptyList(), clusterInfoService));
|
||||||
assertEquals("Unknown ShardsAllocator [dne]", e.getMessage());
|
assertEquals("Unknown ShardsAllocator [dne]", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
import java.util.function.Predicate;
|
||||||
|
|
||||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||||
import static org.hamcrest.Matchers.arrayContaining;
|
import static org.hamcrest.Matchers.arrayContaining;
|
||||||
@ -956,4 +957,17 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||||||
strings = indexNameExpressionResolver.filteringAliases(state, "test-0", "test-*,alias-*");
|
strings = indexNameExpressionResolver.filteringAliases(state, "test-0", "test-*,alias-*");
|
||||||
assertNull(strings);
|
assertNull(strings);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testIndexAliases() {
|
||||||
|
MetaData.Builder mdBuilder = MetaData.builder()
|
||||||
|
.put(indexBuilder("test-0").state(State.OPEN)
|
||||||
|
.putAlias(AliasMetaData.builder("test-alias-0").filter("{ \"term\": \"foo\"}"))
|
||||||
|
.putAlias(AliasMetaData.builder("test-alias-1").filter("{ \"term\": \"foo\"}"))
|
||||||
|
.putAlias(AliasMetaData.builder("test-alias-non-filtering"))
|
||||||
|
);
|
||||||
|
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||||
|
String[] strings = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, true, "test-*");
|
||||||
|
Arrays.sort(strings);
|
||||||
|
assertArrayEquals(new String[] {"test-alias-0", "test-alias-1", "test-alias-non-filtering"}, strings);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -391,7 +391,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
|
|||||||
private class NoopGatewayAllocator extends GatewayAllocator {
|
private class NoopGatewayAllocator extends GatewayAllocator {
|
||||||
|
|
||||||
NoopGatewayAllocator() {
|
NoopGatewayAllocator() {
|
||||||
super(Settings.EMPTY, null, null);
|
super(Settings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -812,4 +812,34 @@ public class BytesStreamsTests extends ESTestCase {
|
|||||||
StreamInput in = new BytesArray(Base64.getDecoder().decode("////////////AQAAAAAAAA==")).streamInput();
|
StreamInput in = new BytesArray(Base64.getDecoder().decode("////////////AQAAAAAAAA==")).streamInput();
|
||||||
assertEquals(-1, in.readVLong());
|
assertEquals(-1, in.readVLong());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public enum TestEnum {
|
||||||
|
ONE,
|
||||||
|
TWO,
|
||||||
|
THREE
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testEnum() throws IOException {
|
||||||
|
TestEnum value = randomFrom(TestEnum.values());
|
||||||
|
BytesStreamOutput output = new BytesStreamOutput();
|
||||||
|
output.writeEnum(value);
|
||||||
|
StreamInput input = output.bytes().streamInput();
|
||||||
|
assertEquals(value, input.readEnum(TestEnum.class));
|
||||||
|
assertEquals(0, input.available());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testInvalidEnum() throws IOException {
|
||||||
|
BytesStreamOutput output = new BytesStreamOutput();
|
||||||
|
int randomNumber = randomInt();
|
||||||
|
boolean validEnum = randomNumber >= 0 && randomNumber < TestEnum.values().length;
|
||||||
|
output.writeVInt(randomNumber);
|
||||||
|
StreamInput input = output.bytes().streamInput();
|
||||||
|
if (validEnum) {
|
||||||
|
assertEquals(TestEnum.values()[randomNumber], input.readEnum(TestEnum.class));
|
||||||
|
} else {
|
||||||
|
IOException ex = expectThrows(IOException.class, () -> input.readEnum(TestEnum.class));
|
||||||
|
assertEquals("Unknown TestEnum ordinal [" + randomNumber + "]", ex.getMessage());
|
||||||
|
}
|
||||||
|
assertEquals(0, input.available());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.discovery;
|
|||||||
|
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||||
import org.elasticsearch.cluster.service.MasterService;
|
import org.elasticsearch.cluster.service.MasterService;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
@ -71,7 +72,8 @@ public class DiscoveryModuleTests extends ESTestCase {
|
|||||||
default Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService,
|
default Map<String, Supplier<Discovery>> getDiscoveryTypes(ThreadPool threadPool, TransportService transportService,
|
||||||
NamedWriteableRegistry namedWriteableRegistry,
|
NamedWriteableRegistry namedWriteableRegistry,
|
||||||
MasterService masterService, ClusterApplier clusterApplier,
|
MasterService masterService, ClusterApplier clusterApplier,
|
||||||
ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider) {
|
ClusterSettings clusterSettings, UnicastHostsProvider hostsProvider,
|
||||||
|
AllocationService allocationService) {
|
||||||
return impl();
|
return impl();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -93,7 +95,7 @@ public class DiscoveryModuleTests extends ESTestCase {
|
|||||||
|
|
||||||
private DiscoveryModule newModule(Settings settings, List<DiscoveryPlugin> plugins) {
|
private DiscoveryModule newModule(Settings settings, List<DiscoveryPlugin> plugins) {
|
||||||
return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService,
|
return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService,
|
||||||
clusterApplier, clusterSettings, plugins);
|
clusterApplier, clusterSettings, plugins, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDefaults() {
|
public void testDefaults() {
|
||||||
|
@ -299,7 +299,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
|
|||||||
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||||
ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()),
|
ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()),
|
||||||
masterService, (source, clusterStateSupplier, listener) -> listener.clusterStateProcessed(source, clusterStateSupplier.get(), clusterStateSupplier.get()),
|
masterService, (source, clusterStateSupplier, listener) -> listener.clusterStateProcessed(source, clusterStateSupplier.get(), clusterStateSupplier.get()),
|
||||||
clusterSettings, Collections::emptyList);
|
clusterSettings, Collections::emptyList, null);
|
||||||
zenDiscovery.start();
|
zenDiscovery.start();
|
||||||
return zenDiscovery;
|
return zenDiscovery;
|
||||||
}
|
}
|
||||||
|
@ -238,31 +238,43 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void testRescheduleAsyncFsync() throws Exception {
|
public void testRescheduleAsyncFsync() throws Exception {
|
||||||
Settings settings = Settings.builder()
|
final Settings settings = Settings.builder()
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "100ms") // very often :)
|
.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "100ms")
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)
|
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)
|
||||||
.build();
|
.build();
|
||||||
IndexService indexService = createIndex("test", settings);
|
final IndexService indexService = createIndex("test", settings);
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
assertNull(indexService.getFsyncTask());
|
assertNull(indexService.getFsyncTask());
|
||||||
IndexMetaData metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)).build();
|
|
||||||
indexService.updateMetaData(metaData);
|
|
||||||
assertNotNull(indexService.getFsyncTask());
|
|
||||||
assertTrue(indexService.getRefreshTask().mustReschedule());
|
|
||||||
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
|
||||||
IndexShard shard = indexService.getShard(0);
|
|
||||||
assertBusy(() -> {
|
|
||||||
assertFalse(shard.getTranslog().syncNeeded());
|
|
||||||
});
|
|
||||||
|
|
||||||
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)).build();
|
client()
|
||||||
indexService.updateMetaData(metaData);
|
.admin()
|
||||||
|
.indices()
|
||||||
|
.prepareUpdateSettings("test")
|
||||||
|
.setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))
|
||||||
|
.get();
|
||||||
|
|
||||||
|
assertNotNull(indexService.getFsyncTask());
|
||||||
|
assertTrue(indexService.getFsyncTask().mustReschedule());
|
||||||
|
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
|
||||||
|
assertNotNull(indexService.getFsyncTask());
|
||||||
|
final IndexShard shard = indexService.getShard(0);
|
||||||
|
assertBusy(() -> assertFalse(shard.getTranslog().syncNeeded()));
|
||||||
|
|
||||||
|
client()
|
||||||
|
.admin()
|
||||||
|
.indices()
|
||||||
|
.prepareUpdateSettings("test")
|
||||||
|
.setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST))
|
||||||
|
.get();
|
||||||
assertNull(indexService.getFsyncTask());
|
assertNull(indexService.getFsyncTask());
|
||||||
|
|
||||||
metaData = IndexMetaData.builder(indexService.getMetaData()).settings(Settings.builder().put(indexService.getMetaData().getSettings()).put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC)).build();
|
client()
|
||||||
indexService.updateMetaData(metaData);
|
.admin()
|
||||||
|
.indices()
|
||||||
|
.prepareUpdateSettings("test")
|
||||||
|
.setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC))
|
||||||
|
.get();
|
||||||
assertNotNull(indexService.getFsyncTask());
|
assertNotNull(indexService.getFsyncTask());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testIllegalFsyncInterval() {
|
public void testIllegalFsyncInterval() {
|
||||||
|
@ -99,7 +99,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
|
|||||||
.bytes(),
|
.bytes(),
|
||||||
XContentType.JSON));
|
XContentType.JSON));
|
||||||
|
|
||||||
assertFieldNames(set("a", "a.keyword", "b", "b.c", "_uid", "_type", "_version", "_seq_no", "_primary_term", "_source"), doc);
|
assertFieldNames(set("a", "a.keyword", "b", "b.c", "_uid", "_version", "_seq_no", "_primary_term", "_source"), doc);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testExplicitEnabled() throws Exception {
|
public void testExplicitEnabled() throws Exception {
|
||||||
@ -117,7 +117,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase {
|
|||||||
.bytes(),
|
.bytes(),
|
||||||
XContentType.JSON));
|
XContentType.JSON));
|
||||||
|
|
||||||
assertFieldNames(set("field", "field.keyword", "_uid", "_type", "_version", "_seq_no", "_primary_term", "_source"), doc);
|
assertFieldNames(set("field", "field.keyword", "_uid", "_version", "_seq_no", "_primary_term", "_source"), doc);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDisabled() throws Exception {
|
public void testDisabled() throws Exception {
|
||||||
|
@ -290,13 +290,13 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||||||
|
|
||||||
public void testIndexSortWithNestedFields() throws IOException {
|
public void testIndexSortWithNestedFields() throws IOException {
|
||||||
Settings settings = Settings.builder()
|
Settings settings = Settings.builder()
|
||||||
.put("index.sort.field", "_type")
|
.put("index.sort.field", "foo")
|
||||||
.build();
|
.build();
|
||||||
IllegalArgumentException invalidNestedException = expectThrows(IllegalArgumentException.class,
|
IllegalArgumentException invalidNestedException = expectThrows(IllegalArgumentException.class,
|
||||||
() -> createIndex("test", settings, "t", "nested_field", "type=nested"));
|
() -> createIndex("test", settings, "t", "nested_field", "type=nested", "foo", "type=keyword"));
|
||||||
assertThat(invalidNestedException.getMessage(),
|
assertThat(invalidNestedException.getMessage(),
|
||||||
containsString("cannot have nested fields when index sort is activated"));
|
containsString("cannot have nested fields when index sort is activated"));
|
||||||
IndexService indexService = createIndex("test", settings, "t");
|
IndexService indexService = createIndex("test", settings, "t", "foo", "type=keyword");
|
||||||
CompressedXContent nestedFieldMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject()
|
CompressedXContent nestedFieldMapping = new CompressedXContent(XContentFactory.jsonBuilder().startObject()
|
||||||
.startObject("properties")
|
.startObject("properties")
|
||||||
.startObject("nested_field")
|
.startObject("nested_field")
|
||||||
@ -310,7 +310,6 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||||||
containsString("cannot have nested fields when index sort is activated"));
|
containsString("cannot have nested fields when index sort is activated"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/pull/24317#issuecomment-297624290")
|
|
||||||
public void testForbidMultipleTypes() throws IOException {
|
public void testForbidMultipleTypes() throws IOException {
|
||||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
|
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
|
||||||
MapperService mapperService = createIndex("test").mapperService();
|
MapperService mapperService = createIndex("test").mapperService();
|
||||||
|
@ -18,14 +18,17 @@
|
|||||||
*/
|
*/
|
||||||
package org.elasticsearch.index.mapper;
|
package org.elasticsearch.index.mapper;
|
||||||
|
|
||||||
|
import org.apache.lucene.document.InetAddressPoint;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.elasticsearch.common.compress.CompressedXContent;
|
import org.elasticsearch.common.compress.CompressedXContent;
|
||||||
|
import org.elasticsearch.common.network.InetAddresses;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.InetAddress;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
@ -40,6 +43,8 @@ import static org.hamcrest.Matchers.containsString;
|
|||||||
public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
||||||
private static String FROM_DATE = "2016-10-31";
|
private static String FROM_DATE = "2016-10-31";
|
||||||
private static String TO_DATE = "2016-11-01 20:00:00";
|
private static String TO_DATE = "2016-11-01 20:00:00";
|
||||||
|
private static String FROM_IP = "::ffff:c0a8:107";
|
||||||
|
private static String TO_IP = "2001:db8::";
|
||||||
private static int FROM = 5;
|
private static int FROM = 5;
|
||||||
private static String FROM_STR = FROM + "";
|
private static String FROM_STR = FROM + "";
|
||||||
private static int TO = 10;
|
private static int TO = 10;
|
||||||
@ -48,12 +53,14 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void setTypeList() {
|
protected void setTypeList() {
|
||||||
TYPES = new HashSet<>(Arrays.asList("date_range", "float_range", "double_range", "integer_range", "long_range"));
|
TYPES = new HashSet<>(Arrays.asList("date_range", "ip_range", "float_range", "double_range", "integer_range", "long_range"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private Object getFrom(String type) {
|
private Object getFrom(String type) {
|
||||||
if (type.equals("date_range")) {
|
if (type.equals("date_range")) {
|
||||||
return FROM_DATE;
|
return FROM_DATE;
|
||||||
|
} else if (type.equals("ip_range")) {
|
||||||
|
return FROM_IP;
|
||||||
}
|
}
|
||||||
return random().nextBoolean() ? FROM : FROM_STR;
|
return random().nextBoolean() ? FROM : FROM_STR;
|
||||||
}
|
}
|
||||||
@ -69,13 +76,17 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||||||
private Object getTo(String type) {
|
private Object getTo(String type) {
|
||||||
if (type.equals("date_range")) {
|
if (type.equals("date_range")) {
|
||||||
return TO_DATE;
|
return TO_DATE;
|
||||||
|
} else if (type.equals("ip_range")) {
|
||||||
|
return TO_IP;
|
||||||
}
|
}
|
||||||
return random().nextBoolean() ? TO : TO_STR;
|
return random().nextBoolean() ? TO : TO_STR;
|
||||||
}
|
}
|
||||||
|
|
||||||
private Number getMax(String type) {
|
private Object getMax(String type) {
|
||||||
if (type.equals("date_range") || type.equals("long_range")) {
|
if (type.equals("date_range") || type.equals("long_range")) {
|
||||||
return Long.MAX_VALUE;
|
return Long.MAX_VALUE;
|
||||||
|
} else if (type.equals("ip_range")) {
|
||||||
|
return InetAddressPoint.MAX_VALUE;
|
||||||
} else if (type.equals("integer_range")) {
|
} else if (type.equals("integer_range")) {
|
||||||
return Integer.MAX_VALUE;
|
return Integer.MAX_VALUE;
|
||||||
} else if (type.equals("float_range")) {
|
} else if (type.equals("float_range")) {
|
||||||
@ -189,7 +200,14 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||||||
assertEquals(2, pointField.fieldType().pointDimensionCount());
|
assertEquals(2, pointField.fieldType().pointDimensionCount());
|
||||||
IndexableField storedField = fields[1];
|
IndexableField storedField = fields[1];
|
||||||
assertTrue(storedField.fieldType().stored());
|
assertTrue(storedField.fieldType().stored());
|
||||||
assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? "1477872000000" : "5"));
|
String strVal = "5";
|
||||||
|
if (type.equals("date_range")) {
|
||||||
|
strVal = "1477872000000";
|
||||||
|
} else if (type.equals("ip_range")) {
|
||||||
|
strVal = InetAddresses.toAddrString(InetAddresses.forString("192.168.1.7")) + " : "
|
||||||
|
+ InetAddresses.toAddrString(InetAddresses.forString("2001:db8:0:0:0:0:0:0"));
|
||||||
|
}
|
||||||
|
assertThat(storedField.stringValue(), containsString(strVal));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -234,7 +252,8 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||||||
.endObject().bytes(),
|
.endObject().bytes(),
|
||||||
XContentType.JSON));
|
XContentType.JSON));
|
||||||
MapperParsingException e = expectThrows(MapperParsingException.class, runnable);
|
MapperParsingException e = expectThrows(MapperParsingException.class, runnable);
|
||||||
assertThat(e.getCause().getMessage(), anyOf(containsString("passed as String"), containsString("failed to parse date")));
|
assertThat(e.getCause().getMessage(), anyOf(containsString("passed as String"),
|
||||||
|
containsString("failed to parse date"), containsString("is not an IP string literal")));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -261,7 +280,8 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||||||
assertEquals(2, doc.rootDoc().getFields("field").length);
|
assertEquals(2, doc.rootDoc().getFields("field").length);
|
||||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||||
IndexableField storedField = fields[1];
|
IndexableField storedField = fields[1];
|
||||||
assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? Long.MAX_VALUE+"" : getMax(type)+""));
|
String expected = type.equals("ip_range") ? InetAddresses.toAddrString((InetAddress)getMax(type)) : getMax(type) +"";
|
||||||
|
assertThat(storedField.stringValue(), containsString(expected));
|
||||||
|
|
||||||
// test null max value
|
// test null max value
|
||||||
doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder()
|
doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder()
|
||||||
@ -280,8 +300,14 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||||||
assertFalse(pointField.fieldType().stored());
|
assertFalse(pointField.fieldType().stored());
|
||||||
storedField = fields[1];
|
storedField = fields[1];
|
||||||
assertTrue(storedField.fieldType().stored());
|
assertTrue(storedField.fieldType().stored());
|
||||||
assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? "1477872000000" : "5"));
|
String strVal = "5";
|
||||||
assertThat(storedField.stringValue(), containsString(getMax(type) + ""));
|
if (type.equals("date_range")) {
|
||||||
|
strVal = "1477872000000";
|
||||||
|
} else if (type.equals("ip_range")) {
|
||||||
|
strVal = InetAddresses.toAddrString(InetAddresses.forString("192.168.1.7")) + " : "
|
||||||
|
+ InetAddresses.toAddrString(InetAddresses.forString("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"));
|
||||||
|
}
|
||||||
|
assertThat(storedField.stringValue(), containsString(strVal));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNoBounds() throws Exception {
|
public void testNoBounds() throws Exception {
|
||||||
@ -316,8 +342,8 @@ public class RangeFieldMapperTests extends AbstractNumericFieldMapperTestCase {
|
|||||||
assertFalse(pointField.fieldType().stored());
|
assertFalse(pointField.fieldType().stored());
|
||||||
IndexableField storedField = fields[1];
|
IndexableField storedField = fields[1];
|
||||||
assertTrue(storedField.fieldType().stored());
|
assertTrue(storedField.fieldType().stored());
|
||||||
assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? Long.MAX_VALUE+"" : getMax(type)+""));
|
String expected = type.equals("ip_range") ? InetAddresses.toAddrString((InetAddress)getMax(type)) : getMax(type) +"";
|
||||||
assertThat(storedField.stringValue(), containsString(getMax(type) + ""));
|
assertThat(storedField.stringValue(), containsString(expected));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testIllegalArguments() throws Exception {
|
public void testIllegalArguments() throws Exception {
|
||||||
|
@ -21,6 +21,8 @@ package org.elasticsearch.index.mapper;
|
|||||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||||
import org.apache.lucene.document.DoubleRange;
|
import org.apache.lucene.document.DoubleRange;
|
||||||
import org.apache.lucene.document.FloatRange;
|
import org.apache.lucene.document.FloatRange;
|
||||||
|
import org.apache.lucene.document.InetAddressPoint;
|
||||||
|
import org.apache.lucene.document.InetAddressRange;
|
||||||
import org.apache.lucene.document.IntRange;
|
import org.apache.lucene.document.IntRange;
|
||||||
import org.apache.lucene.document.LongRange;
|
import org.apache.lucene.document.LongRange;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
@ -37,6 +39,7 @@ import org.elasticsearch.test.IndexSettingsModule;
|
|||||||
import org.joda.time.DateTime;
|
import org.joda.time.DateTime;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
||||||
|
import java.net.InetAddress;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
|
|
||||||
public class RangeFieldTypeTests extends FieldTypeTestCase {
|
public class RangeFieldTypeTests extends FieldTypeTestCase {
|
||||||
@ -100,6 +103,8 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
|
|||||||
return getLongRangeQuery(relation, (long)from, (long)to, includeLower, includeUpper);
|
return getLongRangeQuery(relation, (long)from, (long)to, includeLower, includeUpper);
|
||||||
case DOUBLE:
|
case DOUBLE:
|
||||||
return getDoubleRangeQuery(relation, (double)from, (double)to, includeLower, includeUpper);
|
return getDoubleRangeQuery(relation, (double)from, (double)to, includeLower, includeUpper);
|
||||||
|
case IP:
|
||||||
|
return getInetAddressRangeQuery(relation, (InetAddress)from, (InetAddress)to, includeLower, includeUpper);
|
||||||
default:
|
default:
|
||||||
return getFloatRangeQuery(relation, (float)from, (float)to, includeLower, includeUpper);
|
return getFloatRangeQuery(relation, (float)from, (float)to, includeLower, includeUpper);
|
||||||
}
|
}
|
||||||
@ -142,7 +147,8 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
|
|||||||
return FloatRange.newIntersectsQuery(FIELDNAME, lower, upper);
|
return FloatRange.newIntersectsQuery(FIELDNAME, lower, upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Query getDoubleRangeQuery(ShapeRelation relation, double from, double to, boolean includeLower, boolean includeUpper) {
|
private Query getDoubleRangeQuery(ShapeRelation relation, double from, double to, boolean includeLower,
|
||||||
|
boolean includeUpper) {
|
||||||
double[] lower = new double[] {includeLower ? from : Math.nextUp(from)};
|
double[] lower = new double[] {includeLower ? from : Math.nextUp(from)};
|
||||||
double[] upper = new double[] {includeUpper ? to : Math.nextDown(to)};
|
double[] upper = new double[] {includeUpper ? to : Math.nextDown(to)};
|
||||||
if (relation == ShapeRelation.WITHIN) {
|
if (relation == ShapeRelation.WITHIN) {
|
||||||
@ -153,7 +159,19 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
|
|||||||
return DoubleRange.newIntersectsQuery(FIELDNAME, lower, upper);
|
return DoubleRange.newIntersectsQuery(FIELDNAME, lower, upper);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Object nextFrom() {
|
private Query getInetAddressRangeQuery(ShapeRelation relation, InetAddress from, InetAddress to, boolean includeLower,
|
||||||
|
boolean includeUpper) {
|
||||||
|
InetAddress lower = includeLower ? from : InetAddressPoint.nextUp(from);
|
||||||
|
InetAddress upper = includeUpper ? to : InetAddressPoint.nextDown(to);
|
||||||
|
if (relation == ShapeRelation.WITHIN) {
|
||||||
|
return InetAddressRange.newWithinQuery(FIELDNAME, lower, upper);
|
||||||
|
} else if (relation == ShapeRelation.CONTAINS) {
|
||||||
|
return InetAddressRange.newContainsQuery(FIELDNAME, lower, upper);
|
||||||
|
}
|
||||||
|
return InetAddressRange.newIntersectsQuery(FIELDNAME, lower, upper);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Object nextFrom() throws Exception {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case INTEGER:
|
case INTEGER:
|
||||||
return (int)(random().nextInt() * 0.5 - DISTANCE);
|
return (int)(random().nextInt() * 0.5 - DISTANCE);
|
||||||
@ -163,12 +181,14 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
|
|||||||
return (long)(random().nextLong() * 0.5 - DISTANCE);
|
return (long)(random().nextLong() * 0.5 - DISTANCE);
|
||||||
case FLOAT:
|
case FLOAT:
|
||||||
return (float)(random().nextFloat() * 0.5 - DISTANCE);
|
return (float)(random().nextFloat() * 0.5 - DISTANCE);
|
||||||
|
case IP:
|
||||||
|
return InetAddress.getByName("::ffff:c0a8:107");
|
||||||
default:
|
default:
|
||||||
return random().nextDouble() * 0.5 - DISTANCE;
|
return random().nextDouble() * 0.5 - DISTANCE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private Object nextTo(Object from) {
|
private Object nextTo(Object from) throws Exception {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case INTEGER:
|
case INTEGER:
|
||||||
return (Integer)from + DISTANCE;
|
return (Integer)from + DISTANCE;
|
||||||
@ -178,6 +198,8 @@ public class RangeFieldTypeTests extends FieldTypeTestCase {
|
|||||||
return (Long)from + DISTANCE;
|
return (Long)from + DISTANCE;
|
||||||
case DOUBLE:
|
case DOUBLE:
|
||||||
return (Double)from + DISTANCE;
|
return (Double)from + DISTANCE;
|
||||||
|
case IP:
|
||||||
|
return InetAddress.getByName("2001:db8::");
|
||||||
default:
|
default:
|
||||||
return (Float)from + DISTANCE;
|
return (Float)from + DISTANCE;
|
||||||
}
|
}
|
||||||
|
@ -19,16 +19,31 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.mapper;
|
package org.elasticsearch.index.mapper;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
|
import org.apache.lucene.index.DocValuesType;
|
||||||
|
import org.apache.lucene.index.IndexOptions;
|
||||||
|
import org.apache.lucene.index.IndexWriter;
|
||||||
|
import org.apache.lucene.index.IndexableField;
|
||||||
|
import org.apache.lucene.index.SortedSetDocValues;
|
||||||
|
import org.apache.lucene.store.Directory;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.compress.CompressedXContent;
|
import org.elasticsearch.common.compress.CompressedXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
|
||||||
|
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||||
|
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
|
||||||
|
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||||
|
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
|
||||||
|
|
||||||
public class TypeFieldMapperTests extends ESSingleNodeTestCase {
|
public class TypeFieldMapperTests extends ESSingleNodeTestCase {
|
||||||
|
|
||||||
@ -37,13 +52,60 @@ public class TypeFieldMapperTests extends ESSingleNodeTestCase {
|
|||||||
return pluginList(InternalSettingsPlugin.class);
|
return pluginList(InternalSettingsPlugin.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDocValues() throws Exception {
|
public void testDocValuesMultipleTypes() throws Exception {
|
||||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string();
|
testDocValues(false);
|
||||||
|
|
||||||
DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
|
|
||||||
TypeFieldMapper typeMapper = docMapper.metadataMapper(TypeFieldMapper.class);
|
|
||||||
assertTrue(typeMapper.fieldType().hasDocValues());
|
|
||||||
assertThat(typeMapper.fieldType().fielddataBuilder(), instanceOf(DocValuesIndexFieldData.Builder.class));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testDocValuesSingleType() throws Exception {
|
||||||
|
testDocValues(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testDocValues(boolean singleType) throws IOException {
|
||||||
|
Settings indexSettings = Settings.builder()
|
||||||
|
.put("index.mapping.single_type", singleType)
|
||||||
|
.build();
|
||||||
|
MapperService mapperService = createIndex("test", indexSettings).mapperService();
|
||||||
|
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false);
|
||||||
|
ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON));
|
||||||
|
|
||||||
|
Directory dir = newDirectory();
|
||||||
|
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
|
||||||
|
w.addDocument(document.rootDoc());
|
||||||
|
DirectoryReader r = DirectoryReader.open(w);
|
||||||
|
w.close();
|
||||||
|
|
||||||
|
MappedFieldType ft = mapperService.fullName(TypeFieldMapper.NAME);
|
||||||
|
IndexOrdinalsFieldData fd = (IndexOrdinalsFieldData) ft.fielddataBuilder().build(mapperService.getIndexSettings(),
|
||||||
|
ft, new IndexFieldDataCache.None(), new NoneCircuitBreakerService(), mapperService);
|
||||||
|
AtomicOrdinalsFieldData afd = fd.load(r.leaves().get(0));
|
||||||
|
SortedSetDocValues values = afd.getOrdinalsValues();
|
||||||
|
assertTrue(values.advanceExact(0));
|
||||||
|
assertEquals(0, values.nextOrd());
|
||||||
|
assertEquals(SortedSetDocValues.NO_MORE_ORDS, values.nextOrd());
|
||||||
|
assertEquals(new BytesRef("type"), values.lookupOrd(0));
|
||||||
|
r.close();
|
||||||
|
dir.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testDefaultsMultipleTypes() throws IOException {
|
||||||
|
Settings indexSettings = Settings.builder()
|
||||||
|
.put("index.mapping.single_type", false)
|
||||||
|
.build();
|
||||||
|
MapperService mapperService = createIndex("test", indexSettings).mapperService();
|
||||||
|
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false);
|
||||||
|
ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON));
|
||||||
|
IndexableField[] fields = document.rootDoc().getFields(TypeFieldMapper.NAME);
|
||||||
|
assertEquals(IndexOptions.DOCS, fields[0].fieldType().indexOptions());
|
||||||
|
assertEquals(DocValuesType.SORTED_SET, fields[1].fieldType().docValuesType());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testDefaultsSingleType() throws IOException {
|
||||||
|
Settings indexSettings = Settings.builder()
|
||||||
|
.put("index.mapping.single_type", true)
|
||||||
|
.build();
|
||||||
|
MapperService mapperService = createIndex("test", indexSettings).mapperService();
|
||||||
|
DocumentMapper mapper = mapperService.merge("type", new CompressedXContent("{\"type\":{}}"), MergeReason.MAPPING_UPDATE, false);
|
||||||
|
ParsedDocument document = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), XContentType.JSON));
|
||||||
|
assertEquals(Collections.<IndexableField>emptyList(), Arrays.asList(document.rootDoc().getFields(TypeFieldMapper.NAME)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user