Merge branch 'master' into feature/ingest
This commit is contained in:
commit
4759a6e50f
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
|
@ -31,10 +33,10 @@ class ClusterConfiguration {
|
|||
int numNodes = 1
|
||||
|
||||
@Input
|
||||
int httpPort = 9400
|
||||
int baseHttpPort = 9400
|
||||
|
||||
@Input
|
||||
int transportPort = 9500
|
||||
int baseTransportPort = 9500
|
||||
|
||||
@Input
|
||||
boolean daemonize = true
|
||||
|
@ -45,24 +47,66 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
String jvmArgs = System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
/**
|
||||
* A closure to call before the cluster is considered ready. The closure is passed the node info,
|
||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
* condition is for http on the http port.
|
||||
*/
|
||||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://localhost:${node.httpPort()}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
Map<String, String> systemProperties = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, FileCollection> plugins = new LinkedHashMap<>()
|
||||
Map<String, String> settings = new HashMap<>()
|
||||
|
||||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||
|
||||
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
|
||||
|
||||
@Input
|
||||
void plugin(String name, FileCollection file) {
|
||||
plugins.put(name, file)
|
||||
}
|
||||
|
||||
@Input
|
||||
void systemProperty(String property, String value) {
|
||||
systemProperties.put(property, value)
|
||||
}
|
||||
|
||||
@Input
|
||||
void setting(String name, String value) {
|
||||
settings.put(name, value)
|
||||
}
|
||||
|
||||
@Input
|
||||
void plugin(String name, FileCollection file) {
|
||||
plugins.put(name, file)
|
||||
}
|
||||
|
||||
@Input
|
||||
void plugin(String name, Project pluginProject) {
|
||||
plugins.put(name, pluginProject)
|
||||
}
|
||||
|
||||
@Input
|
||||
void setupCommand(String name, Object... args) {
|
||||
setupCommands.put(name, args)
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an extra configuration file. The path is relative to the config dir, and the sourceFile
|
||||
* is anything accepted by project.file()
|
||||
*/
|
||||
@Input
|
||||
void extraConfigFile(String path, Object sourceFile) {
|
||||
if (path == 'elasticsearch.yml') {
|
||||
throw new GradleException('Overwriting elasticsearch.yml is not allowed, add additional settings using cluster { setting "foo", "bar" }')
|
||||
}
|
||||
extraConfigFiles.put(path, sourceFile)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,8 +21,11 @@ package org.elasticsearch.gradle.test
|
|||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Delete
|
||||
import org.gradle.api.tasks.Exec
|
||||
|
@ -34,87 +37,6 @@ import java.nio.file.Paths
|
|||
*/
|
||||
class ClusterFormationTasks {
|
||||
|
||||
static class NodeInfo {
|
||||
/** common configuration for all nodes, including this one */
|
||||
ClusterConfiguration config
|
||||
/** node number within the cluster, for creating unique names and paths */
|
||||
int nodeNum
|
||||
/** name of the cluster this node is part of */
|
||||
String clusterName
|
||||
/** root directory all node files and operations happen under */
|
||||
File baseDir
|
||||
/** the pid file the node will use */
|
||||
File pidFile
|
||||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
/** working directory for the node process */
|
||||
File cwd
|
||||
/** file that if it exists, indicates the node failed to start */
|
||||
File failedMarker
|
||||
/** stdout/stderr log of the elasticsearch process for this node */
|
||||
File startLog
|
||||
/** directory to install plugins from */
|
||||
File pluginsTmpDir
|
||||
/** environment variables to start the node with */
|
||||
Map<String, String> env
|
||||
/** arguments to start the node with */
|
||||
List<String> args
|
||||
/** Path to the elasticsearch start script */
|
||||
String esScript
|
||||
/** buffer for ant output when starting this node */
|
||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
|
||||
/** Creates a node to run as part of a cluster for the given task */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task) {
|
||||
this.config = config
|
||||
this.nodeNum = nodeNum
|
||||
clusterName = "${task.path.replace(':', '_').substring(1)}"
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
homeDir = homeDir(baseDir, config.distribution)
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
pluginsTmpDir = new File(baseDir, "plugins tmp")
|
||||
|
||||
env = [
|
||||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args = config.systemProperties.collect { key, value -> "-D${key}=${value}" }
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
args.add("-D${property.getKey()}=${property.getValue()}")
|
||||
}
|
||||
}
|
||||
// running with cmd on windows will look for this with the .bat extension
|
||||
esScript = new File(homeDir, 'bin/elasticsearch').toString()
|
||||
}
|
||||
|
||||
/** Returns debug string for the command that started this node. */
|
||||
String getCommandString() {
|
||||
String esCommandString = "Elasticsearch node ${nodeNum} command: ${esScript} "
|
||||
esCommandString += args.join(' ')
|
||||
esCommandString += '\nenvironment:'
|
||||
env.each { k, v -> esCommandString += "\n ${k}: ${v}" }
|
||||
return esCommandString
|
||||
}
|
||||
|
||||
/** Returns the directory elasticsearch home is contained in for the given distribution */
|
||||
static File homeDir(File baseDir, String distro) {
|
||||
String path
|
||||
switch (distro) {
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
path = "elasticsearch-${VersionProperties.elasticsearch}"
|
||||
break;
|
||||
default:
|
||||
throw new InvalidUserDataException("Unknown distribution: ${distro}")
|
||||
}
|
||||
return new File(baseDir, path)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
|
||||
*/
|
||||
|
@ -179,22 +101,21 @@ class ClusterFormationTasks {
|
|||
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node)
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
|
||||
// install plugins
|
||||
for (Map.Entry<String, FileCollection> plugin : node.config.plugins.entrySet()) {
|
||||
// replace every dash followed by a character with just the uppercase character
|
||||
String camelName = plugin.getKey().replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
|
||||
String actionName = "install${camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1)}Plugin"
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, plugin.getValue().singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
|
||||
setup = configureExecTask(taskName(task, node, actionName), project, setup, node, args)
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
|
||||
}
|
||||
|
||||
// extra setup commands
|
||||
for (Map.Entry<String, Object[]> command : node.config.setupCommands.entrySet()) {
|
||||
setup = configureExecTask(taskName(task, node, command.getKey()), project, setup, node, command.getValue())
|
||||
// the first argument is the actual script name, relative to home
|
||||
Object[] args = command.getValue().clone()
|
||||
args[0] = new File(node.homeDir, args[0].toString())
|
||||
setup = configureExecTask(taskName(task, node, command.getKey()), project, setup, node, args)
|
||||
}
|
||||
|
||||
Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node)
|
||||
|
@ -236,34 +157,111 @@ class ClusterFormationTasks {
|
|||
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
Map esConfig = [
|
||||
'cluster.name' : node.clusterName,
|
||||
'http.port' : node.config.httpPort + node.nodeNum,
|
||||
'transport.tcp.port' : node.config.transportPort + node.nodeNum,
|
||||
'http.port' : node.httpPort(),
|
||||
'transport.tcp.port' : node.transportPort(),
|
||||
'pidfile' : node.pidFile,
|
||||
'discovery.zen.ping.unicast.hosts': (0..<node.config.numNodes).collect{"127.0.0.1:${node.config.transportPort + it}"}.join(','),
|
||||
'discovery.zen.ping.unicast.hosts': (0..<node.config.numNodes).collect{"127.0.0.1:${node.config.baseTransportPort + it}"}.join(','),
|
||||
'path.repo' : "${node.homeDir}/repo",
|
||||
'path.shared_data' : "${node.homeDir}/../",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls' : 'http://snapshot.test*'
|
||||
]
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
return project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) << {
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
writeConfig.doFirst {
|
||||
File configFile = new File(node.homeDir, 'config/elasticsearch.yml')
|
||||
logger.info("Configuring ${configFile}")
|
||||
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to copy plugins to a temp dir, which they will later be installed from. */
|
||||
static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
if (node.config.extraConfigFiles.isEmpty()) {
|
||||
return setup
|
||||
}
|
||||
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
|
||||
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
|
||||
File srcConfigFile = project.file(extraConfigFile.getValue())
|
||||
if (srcConfigFile.isDirectory()) {
|
||||
throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}")
|
||||
}
|
||||
if (srcConfigFile.exists() == false) {
|
||||
throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}")
|
||||
}
|
||||
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
|
||||
copyConfig.from(srcConfigFile)
|
||||
.into(destConfigFile.canonicalFile.parentFile)
|
||||
.rename { destConfigFile.name }
|
||||
}
|
||||
return copyConfig
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a task to copy plugins to a temp dir, which they will later be installed from.
|
||||
*
|
||||
* For each plugin, if the plugin has rest spec apis in its tests, those api files are also copied
|
||||
* to the test resources for this project.
|
||||
*/
|
||||
static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
if (node.config.plugins.isEmpty()) {
|
||||
return setup
|
||||
}
|
||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
|
||||
return project.tasks.create(name: name, type: Copy, dependsOn: setup) {
|
||||
into node.pluginsTmpDir
|
||||
from(node.config.plugins.values())
|
||||
List<FileCollection> pluginFiles = []
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
FileCollection pluginZip
|
||||
if (plugin.getValue() instanceof Project) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
|
||||
}
|
||||
String configurationName = "_plugin_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
project.dependencies.add(configurationName, pluginProject)
|
||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||
pluginZip = configuration
|
||||
|
||||
// also allow rest tests to use the rest spec from the plugin
|
||||
Copy copyRestSpec = null
|
||||
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
||||
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
||||
if (restApiDir.exists() == false) continue
|
||||
if (copyRestSpec == null) {
|
||||
copyRestSpec = project.tasks.create(name: pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec'), type: Copy)
|
||||
copyPlugins.dependsOn(copyRestSpec)
|
||||
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
||||
}
|
||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||
}
|
||||
} else {
|
||||
pluginZip = plugin.getValue()
|
||||
}
|
||||
pluginFiles.add(pluginZip)
|
||||
}
|
||||
|
||||
copyPlugins.into(node.pluginsTmpDir)
|
||||
copyPlugins.from(pluginFiles)
|
||||
return copyPlugins
|
||||
}
|
||||
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
|
||||
FileCollection pluginZip
|
||||
if (plugin instanceof Project) {
|
||||
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
} else {
|
||||
pluginZip = plugin
|
||||
}
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
||||
/** Adds a task to execute a command to help setup the cluster */
|
||||
|
@ -305,7 +303,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
// this closure is converted into ant nodes by groovy's AntBuilder
|
||||
Closure antRunner = {
|
||||
Closure antRunner = { AntBuilder ant ->
|
||||
// we must add debug options inside the closure so the config is read at execution time, as
|
||||
// gradle task options are not processed until the end of the configuration phase
|
||||
if (node.config.debug) {
|
||||
|
@ -334,7 +332,7 @@ class ClusterFormationTasks {
|
|||
script = wrapperScript.toString()
|
||||
}
|
||||
|
||||
exec(executable: executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
|
||||
ant.exec(executable: executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
|
||||
node.env.each { key, value -> env(key: key, value: value) }
|
||||
arg(value: script)
|
||||
node.args.each { arg(value: it) }
|
||||
|
@ -347,7 +345,6 @@ class ClusterFormationTasks {
|
|||
node.getCommandString().eachLine { line -> logger.info(line) }
|
||||
|
||||
if (logger.isInfoEnabled() || node.config.daemonize == false) {
|
||||
// run with piping streams directly out (even stderr to stdout since gradle would capture it)
|
||||
runAntCommand(project, antRunner, System.out, System.err)
|
||||
} else {
|
||||
// buffer the output, we may not need to print it
|
||||
|
@ -376,7 +373,7 @@ class ClusterFormationTasks {
|
|||
resourceexists {
|
||||
file(file: node.pidFile.toString())
|
||||
}
|
||||
http(url: "http://localhost:${node.config.httpPort + node.nodeNum}")
|
||||
socket(server: '127.0.0.1', port: node.httpPort())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -386,6 +383,31 @@ class ClusterFormationTasks {
|
|||
anyNodeFailed |= node.failedMarker.exists()
|
||||
}
|
||||
if (ant.properties.containsKey("failed${name}".toString()) || anyNodeFailed) {
|
||||
waitFailed(nodes, logger, 'Failed to start elasticsearch')
|
||||
}
|
||||
|
||||
// go through each node checking the wait condition
|
||||
for (NodeInfo node : nodes) {
|
||||
// first bind node info to the closure, then pass to the ant runner so we can get good logging
|
||||
Closure antRunner = node.config.waitCondition.curry(node)
|
||||
|
||||
boolean success
|
||||
if (logger.isInfoEnabled()) {
|
||||
success = runAntCommand(project, antRunner, System.out, System.err)
|
||||
} else {
|
||||
PrintStream captureStream = new PrintStream(node.buffer, true, "UTF-8")
|
||||
success = runAntCommand(project, antRunner, captureStream, captureStream)
|
||||
}
|
||||
|
||||
if (success == false) {
|
||||
waitFailed(nodes, logger, 'Elasticsearch cluster failed to pass wait condition')
|
||||
}
|
||||
}
|
||||
}
|
||||
return wait
|
||||
}
|
||||
|
||||
static void waitFailed(List<NodeInfo> nodes, Logger logger, String msg) {
|
||||
for (NodeInfo node : nodes) {
|
||||
if (logger.isInfoEnabled() == false) {
|
||||
// We already log the command at info level. No need to do it twice.
|
||||
|
@ -400,10 +422,7 @@ class ClusterFormationTasks {
|
|||
node.startLog.eachLine { line -> logger.error(line) }
|
||||
}
|
||||
}
|
||||
throw new GradleException('Failed to start elasticsearch')
|
||||
}
|
||||
}
|
||||
return wait
|
||||
throw new GradleException(msg)
|
||||
}
|
||||
|
||||
/** Adds a task to check if the process with the given pidfile is actually elasticsearch */
|
||||
|
@ -474,15 +493,22 @@ class ClusterFormationTasks {
|
|||
}
|
||||
}
|
||||
|
||||
static String pluginTaskName(String action, String name, String suffix) {
|
||||
// replace every dash followed by a character with just the uppercase character
|
||||
String camelName = name.replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
|
||||
return action + camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1) + suffix
|
||||
}
|
||||
|
||||
/** Runs an ant command, sending output to the given out and error streams */
|
||||
static void runAntCommand(Project project, Closure command, PrintStream outputStream, PrintStream errorStream) {
|
||||
static Object runAntCommand(Project project, Closure command, PrintStream outputStream, PrintStream errorStream) {
|
||||
DefaultLogger listener = new DefaultLogger(
|
||||
errorPrintStream: errorStream,
|
||||
outputPrintStream: outputStream,
|
||||
messageOutputLevel: org.apache.tools.ant.Project.MSG_INFO)
|
||||
|
||||
project.ant.project.addBuildListener(listener)
|
||||
project.configure(project.ant, command)
|
||||
Object retVal = command(project.ant)
|
||||
project.ant.project.removeBuildListener(listener)
|
||||
return retVal
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
|
||||
/**
|
||||
* A container for the files and configuration associated with a single node in a test cluster.
|
||||
*/
|
||||
class NodeInfo {
|
||||
/** common configuration for all nodes, including this one */
|
||||
ClusterConfiguration config
|
||||
|
||||
/** node number within the cluster, for creating unique names and paths */
|
||||
int nodeNum
|
||||
|
||||
/** name of the cluster this node is part of */
|
||||
String clusterName
|
||||
|
||||
/** root directory all node files and operations happen under */
|
||||
File baseDir
|
||||
|
||||
/** the pid file the node will use */
|
||||
File pidFile
|
||||
|
||||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
|
||||
/** working directory for the node process */
|
||||
File cwd
|
||||
|
||||
/** file that if it exists, indicates the node failed to start */
|
||||
File failedMarker
|
||||
|
||||
/** stdout/stderr log of the elasticsearch process for this node */
|
||||
File startLog
|
||||
|
||||
/** directory to install plugins from */
|
||||
File pluginsTmpDir
|
||||
|
||||
/** environment variables to start the node with */
|
||||
Map<String, String> env
|
||||
|
||||
/** arguments to start the node with */
|
||||
List<String> args
|
||||
|
||||
/** Path to the elasticsearch start script */
|
||||
String esScript
|
||||
|
||||
/** buffer for ant output when starting this node */
|
||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
|
||||
/** Creates a node to run as part of a cluster for the given task */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task) {
|
||||
this.config = config
|
||||
this.nodeNum = nodeNum
|
||||
clusterName = "${task.path.replace(':', '_').substring(1)}"
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
homeDir = homeDir(baseDir, config.distribution)
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
pluginsTmpDir = new File(baseDir, "plugins tmp")
|
||||
|
||||
env = [
|
||||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args = config.systemProperties.collect { key, value -> "-D${key}=${value}" }
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
args.add("-D${property.getKey()}=${property.getValue()}")
|
||||
}
|
||||
}
|
||||
// running with cmd on windows will look for this with the .bat extension
|
||||
esScript = new File(homeDir, 'bin/elasticsearch').toString()
|
||||
}
|
||||
|
||||
/** Returns debug string for the command that started this node. */
|
||||
String getCommandString() {
|
||||
String esCommandString = "Elasticsearch node ${nodeNum} command: ${esScript} "
|
||||
esCommandString += args.join(' ')
|
||||
esCommandString += '\nenvironment:'
|
||||
env.each { k, v -> esCommandString += "\n ${k}: ${v}" }
|
||||
return esCommandString
|
||||
}
|
||||
|
||||
/** Returns the http port for this node */
|
||||
int httpPort() {
|
||||
return config.baseHttpPort + nodeNum
|
||||
}
|
||||
|
||||
/** Returns the transport port for this node */
|
||||
int transportPort() {
|
||||
return config.baseTransportPort + nodeNum
|
||||
}
|
||||
|
||||
/** Returns the directory elasticsearch home is contained in for the given distribution */
|
||||
static File homeDir(File baseDir, String distro) {
|
||||
String path
|
||||
switch (distro) {
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
path = "elasticsearch-${VersionProperties.elasticsearch}"
|
||||
break;
|
||||
default:
|
||||
throw new InvalidUserDataException("Unknown distribution: ${distro}")
|
||||
}
|
||||
return new File(baseDir, path)
|
||||
}
|
||||
}
|
|
@ -67,7 +67,9 @@ class RestIntegTestTask extends RandomizedTestingTask {
|
|||
}
|
||||
|
||||
RestIntegTestTask() {
|
||||
project.afterEvaluate {
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
Task test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
mustRunAfter(test)
|
||||
|
@ -75,7 +77,7 @@ class RestIntegTestTask extends RandomizedTestingTask {
|
|||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
configure {
|
||||
parallelism '1'
|
||||
systemProperty 'tests.cluster', "localhost:${clusterConfig.transportPort}"
|
||||
systemProperty 'tests.cluster', "localhost:${clusterConfig.baseTransportPort}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import org.gradle.api.internal.tasks.options.Option
|
|||
|
||||
class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false)
|
||||
|
||||
RunTask() {
|
||||
project.afterEvaluate {
|
||||
|
|
|
@ -51,9 +51,13 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
private static final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE;
|
||||
private final Map<String, List<String>> headers = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Construct a <code>ElasticsearchException</code> with the specified cause exception.
|
||||
*/
|
||||
public ElasticsearchException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a <code>ElasticsearchException</code> with the specified detail message.
|
||||
*
|
||||
|
|
|
@ -266,11 +266,15 @@ public class Version {
|
|||
public static final int V_2_0_0_ID = 2000099;
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_1_ID = 2000199;
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_2_ID = 2000299;
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_0);
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_0);
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
|
@ -289,8 +293,12 @@ public class Version {
|
|||
return V_3_0_0;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_1_ID:
|
||||
return V_2_1_1;
|
||||
case V_2_1_0_ID:
|
||||
return V_2_1_0;
|
||||
case V_2_0_2_ID:
|
||||
return V_2_0_2;
|
||||
case V_2_0_1_ID:
|
||||
return V_2_0_1;
|
||||
case V_2_0_0_ID:
|
||||
|
|
|
@ -191,7 +191,7 @@ class JNANatives {
|
|||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("unable to install syscall filter", t);
|
||||
}
|
||||
logger.warn("unable to install syscall filter: " + t.getMessage());
|
||||
logger.warn("unable to install syscall filter: ", t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -231,7 +231,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
}
|
||||
} catch (Exception ex) {
|
||||
// Wrap the inner exception so we have the index name in the exception message
|
||||
throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "], reason: [" + ex.getMessage() + "]", ex);
|
||||
throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,15 +20,17 @@
|
|||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.gateway.CorruptStateException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Uniquely identifies an allocation. An allocation is a shard moving from unassigned to initializing,
|
||||
|
@ -43,6 +45,30 @@ public class AllocationId implements ToXContent {
|
|||
private static final String ID_KEY = "id";
|
||||
private static final String RELOCATION_ID_KEY = "relocation_id";
|
||||
|
||||
private static final ObjectParser<AllocationId.Builder, Void> ALLOCATION_ID_PARSER = new ObjectParser<>("allocationId");
|
||||
|
||||
static {
|
||||
ALLOCATION_ID_PARSER.declareString(AllocationId.Builder::setId, new ParseField(ID_KEY));
|
||||
ALLOCATION_ID_PARSER.declareString(AllocationId.Builder::setRelocationId, new ParseField(RELOCATION_ID_KEY));
|
||||
}
|
||||
|
||||
private static class Builder {
|
||||
private String id;
|
||||
private String relocationId;
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
public void setRelocationId(String relocationId) {
|
||||
this.relocationId = relocationId;
|
||||
}
|
||||
|
||||
public AllocationId build() {
|
||||
return new AllocationId(id, relocationId);
|
||||
}
|
||||
}
|
||||
|
||||
private final String id;
|
||||
@Nullable
|
||||
private final String relocationId;
|
||||
|
@ -58,6 +84,7 @@ public class AllocationId implements ToXContent {
|
|||
}
|
||||
|
||||
private AllocationId(String id, String relocationId) {
|
||||
Objects.requireNonNull(id, "Argument [id] must be non-null");
|
||||
this.id = id;
|
||||
this.relocationId = relocationId;
|
||||
}
|
||||
|
@ -164,35 +191,6 @@ public class AllocationId implements ToXContent {
|
|||
}
|
||||
|
||||
public static AllocationId fromXContent(XContentParser parser) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == null) { // fresh parser? move to the first real token under object
|
||||
token = parser.nextToken();
|
||||
}
|
||||
assert token == XContentParser.Token.START_OBJECT;
|
||||
|
||||
String id = null;
|
||||
String relocationId = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (ID_KEY.equals(currentFieldName)) {
|
||||
id = parser.text();
|
||||
} else if (RELOCATION_ID_KEY.equals(currentFieldName)) {
|
||||
relocationId = parser.text();
|
||||
} else {
|
||||
throw new CorruptStateException("unexpected field in allocation id [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new CorruptStateException("unexpected token in allocation id [" + token.name() + "]");
|
||||
}
|
||||
}
|
||||
if (id == null) {
|
||||
throw new CorruptStateException("missing value for [id] in allocation id");
|
||||
}
|
||||
return new AllocationId(id, relocationId);
|
||||
return ALLOCATION_ID_PARSER.parse(parser, new AllocationId.Builder()).build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -643,7 +643,8 @@ public class Base64 {
|
|||
try {
|
||||
encoded = encodeBytes(source, 0, source.length, NO_OPTIONS);
|
||||
} catch (java.io.IOException ex) {
|
||||
assert false : ex.getMessage();
|
||||
// not sure why this was an assertion before, running with assertions disabled would mean swallowing this exception
|
||||
throw new IllegalStateException(ex);
|
||||
} // end catch
|
||||
assert encoded != null;
|
||||
return encoded;
|
||||
|
@ -705,7 +706,7 @@ public class Base64 {
|
|||
try {
|
||||
encoded = encodeBytes(source, off, len, NO_OPTIONS);
|
||||
} catch (java.io.IOException ex) {
|
||||
assert false : ex.getMessage();
|
||||
throw new IllegalStateException(ex);
|
||||
} // end catch
|
||||
assert encoded != null;
|
||||
return encoded;
|
||||
|
@ -766,7 +767,7 @@ public class Base64 {
|
|||
try {
|
||||
encoded = encodeBytesToBytes(source, 0, source.length, Base64.NO_OPTIONS);
|
||||
} catch (java.io.IOException ex) {
|
||||
assert false : "IOExceptions only come from GZipping, which is turned off: " + ex.getMessage();
|
||||
throw new IllegalStateException("IOExceptions only come from GZipping, which is turned off: ", ex);
|
||||
}
|
||||
return encoded;
|
||||
}
|
||||
|
|
|
@ -21,17 +21,23 @@ package org.elasticsearch.common.geo.builders;
|
|||
|
||||
import com.spatial4j.core.shape.Circle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.unit.DistanceUnit.Distance;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class CircleBuilder extends ShapeBuilder {
|
||||
|
||||
public static final String FIELD_RADIUS = "radius";
|
||||
public static final GeoShapeType TYPE = GeoShapeType.CIRCLE;
|
||||
|
||||
public static final CircleBuilder PROTOTYPE = new CircleBuilder();
|
||||
|
||||
private DistanceUnit unit;
|
||||
private double radius;
|
||||
private Coordinate center;
|
||||
|
@ -57,6 +63,13 @@ public class CircleBuilder extends ShapeBuilder {
|
|||
return center(new Coordinate(lon, lat));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the center of the circle
|
||||
*/
|
||||
public Coordinate center() {
|
||||
return center;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the radius of the circle. The String value will be parsed by {@link DistanceUnit}
|
||||
* @param radius Value and unit of the circle combined in a string
|
||||
|
@ -97,10 +110,24 @@ public class CircleBuilder extends ShapeBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the radius of the circle without unit
|
||||
*/
|
||||
public double radius() {
|
||||
return this.radius;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the radius unit of the circle
|
||||
*/
|
||||
public DistanceUnit unit() {
|
||||
return this.unit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_RADIUS, unit.toString(radius));
|
||||
builder.field(FIELD_COORDINATES);
|
||||
toXContent(builder, center);
|
||||
|
@ -116,4 +143,37 @@ public class CircleBuilder extends ShapeBuilder {
|
|||
public GeoShapeType type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(center, radius, unit.ordinal());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
CircleBuilder other = (CircleBuilder) obj;
|
||||
return Objects.equals(center, other.center) &&
|
||||
Objects.equals(radius, other.radius) &&
|
||||
Objects.equals(unit.ordinal(), other.unit.ordinal());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
writeCoordinateTo(center, out);
|
||||
out.writeDouble(radius);
|
||||
DistanceUnit.writeDistanceUnit(out, unit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CircleBuilder readFrom(StreamInput in) throws IOException {
|
||||
return new CircleBuilder()
|
||||
.center(readCoordinateFrom(in))
|
||||
.radius(in.readDouble(), DistanceUnit.readDistanceUnit(in));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,13 +21,19 @@ package org.elasticsearch.common.geo.builders;
|
|||
|
||||
import com.spatial4j.core.shape.Rectangle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
public class EnvelopeBuilder extends ShapeBuilder {
|
||||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
|
||||
public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder();
|
||||
|
||||
protected Coordinate topLeft;
|
||||
protected Coordinate bottomRight;
|
||||
|
@ -61,7 +67,8 @@ public class EnvelopeBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
|
||||
builder.startArray(FIELD_COORDINATES);
|
||||
toXContent(builder, topLeft);
|
||||
toXContent(builder, bottomRight);
|
||||
|
@ -78,4 +85,38 @@ public class EnvelopeBuilder extends ShapeBuilder {
|
|||
public GeoShapeType type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(orientation, topLeft, bottomRight);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
EnvelopeBuilder other = (EnvelopeBuilder) obj;
|
||||
return Objects.equals(orientation, other.orientation) &&
|
||||
Objects.equals(topLeft, other.topLeft) &&
|
||||
Objects.equals(bottomRight, other.bottomRight);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(orientation == Orientation.RIGHT);
|
||||
writeCoordinateTo(topLeft, out);
|
||||
writeCoordinateTo(bottomRight, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnvelopeBuilder readFrom(StreamInput in) throws IOException {
|
||||
Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT;
|
||||
return new EnvelopeBuilder(orientation)
|
||||
.topLeft(readCoordinateFrom(in))
|
||||
.bottomRight(readCoordinateFrom(in));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.startArray(FIELD_GEOMETRIES);
|
||||
for (ShapeBuilder shape : shapes) {
|
||||
shape.toXContent(builder, params);
|
||||
|
|
|
@ -39,7 +39,7 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
coordinatesToXcontent(builder, false);
|
||||
builder.endObject();
|
||||
|
|
|
@ -57,7 +57,7 @@ public class MultiLineStringBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
builder.startArray();
|
||||
for(LineStringBuilder line : lines) {
|
||||
|
|
|
@ -37,7 +37,7 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
super.coordinatesToXcontent(builder, false);
|
||||
builder.endObject();
|
||||
|
|
|
@ -51,7 +51,7 @@ public class MultiPolygonBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.startArray(FIELD_COORDINATES);
|
||||
for(PolygonBuilder polygon : polygons) {
|
||||
builder.startArray();
|
||||
|
|
|
@ -20,7 +20,10 @@
|
|||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import com.spatial4j.core.shape.Point;
|
||||
|
@ -30,6 +33,8 @@ public class PointBuilder extends ShapeBuilder {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.POINT;
|
||||
|
||||
public static final PointBuilder PROTOTYPE = new PointBuilder();
|
||||
|
||||
private Coordinate coordinate;
|
||||
|
||||
public PointBuilder coordinate(Coordinate coordinate) {
|
||||
|
@ -48,7 +53,7 @@ public class PointBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
toXContent(builder, coordinate);
|
||||
return builder.endObject();
|
||||
|
@ -63,4 +68,31 @@ public class PointBuilder extends ShapeBuilder {
|
|||
public GeoShapeType type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(coordinate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
PointBuilder other = (PointBuilder) obj;
|
||||
return Objects.equals(coordinate, other.coordinate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
writeCoordinateTo(coordinate, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PointBuilder readFrom(StreamInput in) throws IOException {
|
||||
return new PointBuilder().coordinate(readCoordinateFrom(in));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.startArray(FIELD_COORDINATES);
|
||||
coordinatesArray(builder, params);
|
||||
builder.endArray();
|
||||
|
|
|
@ -26,8 +26,12 @@ import com.spatial4j.core.shape.jts.JtsGeometry;
|
|||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.unit.DistanceUnit.Distance;
|
||||
|
@ -43,7 +47,7 @@ import java.util.*;
|
|||
/**
|
||||
* Basic class for building GeoJSON shapes like Polygons, Linestrings, etc
|
||||
*/
|
||||
public abstract class ShapeBuilder extends ToXContentToBytes {
|
||||
public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable<ShapeBuilder> {
|
||||
|
||||
protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
|
||||
|
||||
|
@ -173,6 +177,15 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
return builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
|
||||
}
|
||||
|
||||
protected static void writeCoordinateTo(Coordinate coordinate, StreamOutput out) throws IOException {
|
||||
out.writeDouble(coordinate.x);
|
||||
out.writeDouble(coordinate.y);
|
||||
}
|
||||
|
||||
protected Coordinate readCoordinateFrom(StreamInput in) throws IOException {
|
||||
return new Coordinate(in.readDouble(), in.readDouble());
|
||||
}
|
||||
|
||||
public static Orientation orientationFromString(String orientation) {
|
||||
orientation = orientation.toLowerCase(Locale.ROOT);
|
||||
switch (orientation) {
|
||||
|
@ -565,12 +578,16 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
ENVELOPE("envelope"),
|
||||
CIRCLE("circle");
|
||||
|
||||
protected final String shapename;
|
||||
private final String shapename;
|
||||
|
||||
private GeoShapeType(String shapename) {
|
||||
this.shapename = shapename;
|
||||
}
|
||||
|
||||
protected String shapeName() {
|
||||
return shapename;
|
||||
}
|
||||
|
||||
public static GeoShapeType forName(String geoshapename) {
|
||||
String typename = geoshapename.toLowerCase(Locale.ROOT);
|
||||
for (GeoShapeType type : values()) {
|
||||
|
@ -823,4 +840,20 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
return geometryCollection;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return type().shapeName();
|
||||
}
|
||||
|
||||
// NORELEASE this should be deleted as soon as all shape builders implement writable
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
}
|
||||
|
||||
// NORELEASE this should be deleted as soon as all shape builders implement writable
|
||||
@Override
|
||||
public ShapeBuilder readFrom(StreamInput in) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -331,6 +331,6 @@ public abstract class Multibinder<T> {
|
|||
|
||||
NullPointerException npe = new NullPointerException(name);
|
||||
throw new ConfigurationException(singleton(
|
||||
new Message(emptyList(), npe.toString(), npe)));
|
||||
new Message(emptyList(), npe)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,10 @@ public final class Message implements Serializable, Element {
|
|||
this(Collections.singletonList(source), message, null);
|
||||
}
|
||||
|
||||
public Message(Object source, Throwable cause) {
|
||||
this(Collections.singletonList(source), null, cause);
|
||||
}
|
||||
|
||||
public Message(String message) {
|
||||
this(Collections.emptyList(), message, null);
|
||||
}
|
||||
|
|
|
@ -525,7 +525,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
}
|
||||
});
|
||||
} else if (node.equals(nodes().masterNode())) {
|
||||
handleMasterGone(node, "shut_down");
|
||||
handleMasterGone(node, null, "shut_down");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -615,7 +615,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
});
|
||||
}
|
||||
|
||||
private void handleMasterGone(final DiscoveryNode masterNode, final String reason) {
|
||||
private void handleMasterGone(final DiscoveryNode masterNode, final Throwable cause, final String reason) {
|
||||
if (lifecycleState() != Lifecycle.State.STARTED) {
|
||||
// not started, ignore a master failure
|
||||
return;
|
||||
|
@ -625,7 +625,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
return;
|
||||
}
|
||||
|
||||
logger.info("master_left [{}], reason [{}]", masterNode, reason);
|
||||
logger.info("master_left [{}], reason [{}]", cause, masterNode, reason);
|
||||
|
||||
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
|
||||
|
@ -1078,8 +1078,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
private class MasterNodeFailureListener implements MasterFaultDetection.Listener {
|
||||
|
||||
@Override
|
||||
public void onMasterFailure(DiscoveryNode masterNode, String reason) {
|
||||
handleMasterGone(masterNode, reason);
|
||||
public void onMasterFailure(DiscoveryNode masterNode, Throwable cause, String reason) {
|
||||
handleMasterGone(masterNode, cause, reason);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
public static interface Listener {
|
||||
|
||||
/** called when pinging the master failed, like a timeout, transport disconnects etc */
|
||||
void onMasterFailure(DiscoveryNode masterNode, String reason);
|
||||
void onMasterFailure(DiscoveryNode masterNode, Throwable cause, String reason);
|
||||
|
||||
}
|
||||
|
||||
|
@ -117,7 +117,7 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
transportService.connectToNode(masterNode);
|
||||
} catch (final Exception e) {
|
||||
// notify master failure (which stops also) and bail..
|
||||
notifyMasterFailure(masterNode, "failed to perform initial connect [" + e.getMessage() + "]");
|
||||
notifyMasterFailure(masterNode, e, "failed to perform initial connect ");
|
||||
return;
|
||||
}
|
||||
if (masterPinger != null) {
|
||||
|
@ -176,22 +176,22 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
threadPool.schedule(TimeValue.timeValueMillis(0), ThreadPool.Names.SAME, masterPinger);
|
||||
} catch (Exception e) {
|
||||
logger.trace("[master] [{}] transport disconnected (with verified connect)", masterNode);
|
||||
notifyMasterFailure(masterNode, "transport disconnected (with verified connect)");
|
||||
notifyMasterFailure(masterNode, null, "transport disconnected (with verified connect)");
|
||||
}
|
||||
} else {
|
||||
logger.trace("[master] [{}] transport disconnected", node);
|
||||
notifyMasterFailure(node, "transport disconnected");
|
||||
notifyMasterFailure(node, null, "transport disconnected");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void notifyMasterFailure(final DiscoveryNode masterNode, final String reason) {
|
||||
private void notifyMasterFailure(final DiscoveryNode masterNode, final Throwable cause, final String reason) {
|
||||
if (notifiedMasterFailure.compareAndSet(false, true)) {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
for (Listener listener : listeners) {
|
||||
listener.onMasterFailure(masterNode, reason);
|
||||
listener.onMasterFailure(masterNode, cause, reason);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -255,15 +255,15 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
return;
|
||||
} else if (exp.getCause() instanceof NotMasterException) {
|
||||
logger.debug("[master] pinging a master {} that is no longer a master", masterNode);
|
||||
notifyMasterFailure(masterToPing, "no longer master");
|
||||
notifyMasterFailure(masterToPing, exp, "no longer master");
|
||||
return;
|
||||
} else if (exp.getCause() instanceof ThisIsNotTheMasterYouAreLookingForException) {
|
||||
logger.debug("[master] pinging a master {} that is not the master", masterNode);
|
||||
notifyMasterFailure(masterToPing, "not master");
|
||||
notifyMasterFailure(masterToPing, exp,"not master");
|
||||
return;
|
||||
} else if (exp.getCause() instanceof NodeDoesNotExistOnMasterException) {
|
||||
logger.debug("[master] pinging a master {} but we do not exists on it, act as if its master failure", masterNode);
|
||||
notifyMasterFailure(masterToPing, "do not exists on master, act as master failure");
|
||||
notifyMasterFailure(masterToPing, exp,"do not exists on master, act as master failure");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
if (retryCount >= pingRetryCount) {
|
||||
logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout);
|
||||
// not good, failure
|
||||
notifyMasterFailure(masterToPing, "failed to ping, tried [" + pingRetryCount + "] times, each with maximum [" + pingRetryTimeout + "] timeout");
|
||||
notifyMasterFailure(masterToPing, null, "failed to ping, tried [" + pingRetryCount + "] times, each with maximum [" + pingRetryTimeout + "] timeout");
|
||||
} else {
|
||||
// resend the request, not reschedule, rely on send timeout
|
||||
transportService.sendRequest(masterToPing, MASTER_PING_ACTION_NAME, request, options, this);
|
||||
|
|
|
@ -140,9 +140,9 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
throw t;
|
||||
} catch (Throwable t) {
|
||||
// try to fail committing, in cause it's still on going
|
||||
if (sendingController.markAsFailed("unexpected error [" + t.getMessage() + "]")) {
|
||||
if (sendingController.markAsFailed("unexpected error", t)) {
|
||||
// signal the change should be rejected
|
||||
throw new Discovery.FailedToCommitClusterStateException("unexpected error [{}]", t, t.getMessage());
|
||||
throw new Discovery.FailedToCommitClusterStateException("unexpected error", t);
|
||||
} else {
|
||||
throw t;
|
||||
}
|
||||
|
@ -583,6 +583,21 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* tries marking the publishing as failed, if a decision wasn't made yet
|
||||
*
|
||||
* @return true if the publishing was failed and the cluster state is *not* committed
|
||||
**/
|
||||
synchronized private boolean markAsFailed(String details, Throwable reason) {
|
||||
if (committedOrFailed()) {
|
||||
return committed == false;
|
||||
}
|
||||
logger.trace("failed to commit version [{}]. {}", reason, clusterState.version(), details);
|
||||
committed = false;
|
||||
committedOrFailedLatch.countDown();
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* tries marking the publishing as failed, if a decision wasn't made yet
|
||||
*
|
||||
|
|
|
@ -235,7 +235,7 @@ public class Analysis {
|
|||
try (BufferedReader reader = FileSystemUtils.newBufferedReader(wordListFile.toUri().toURL(), StandardCharsets.UTF_8)) {
|
||||
return loadWordList(reader, "#");
|
||||
} catch (IOException ioe) {
|
||||
String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage());
|
||||
String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix);
|
||||
throw new IllegalArgumentException(message, ioe);
|
||||
}
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ public class Analysis {
|
|||
try {
|
||||
return FileSystemUtils.newBufferedReader(path.toUri().toURL(), StandardCharsets.UTF_8);
|
||||
} catch (IOException ioe) {
|
||||
String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage());
|
||||
String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix);
|
||||
throw new IllegalArgumentException(message, ioe);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public class TranslogRecoveryPerformer {
|
|||
numOps++;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
throw new BatchOperationException(shardId, "failed to apply batch translog operation [" + t.getMessage() + "]", numOps, t);
|
||||
throw new BatchOperationException(shardId, "failed to apply batch translog operation", numOps, t);
|
||||
}
|
||||
return numOps;
|
||||
}
|
||||
|
|
|
@ -33,6 +33,11 @@ public class IndexShardSnapshotException extends ElasticsearchException {
|
|||
this(shardId, msg, null);
|
||||
}
|
||||
|
||||
public IndexShardSnapshotException(ShardId shardId, Throwable cause) {
|
||||
super(cause);
|
||||
setShard(shardId);
|
||||
}
|
||||
|
||||
public IndexShardSnapshotException(ShardId shardId, String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
setShard(shardId);
|
||||
|
|
|
@ -32,6 +32,10 @@ public class IndexShardSnapshotFailedException extends IndexShardSnapshotExcepti
|
|||
super(shardId, msg);
|
||||
}
|
||||
|
||||
public IndexShardSnapshotFailedException(ShardId shardId, Throwable cause) {
|
||||
super(shardId, cause);
|
||||
}
|
||||
|
||||
public IndexShardSnapshotFailedException(ShardId shardId, String msg, Throwable cause) {
|
||||
super(shardId, msg, cause);
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
if (e instanceof IndexShardSnapshotFailedException) {
|
||||
throw (IndexShardSnapshotFailedException) e;
|
||||
} else {
|
||||
throw new IndexShardSnapshotFailedException(shardId, e.getMessage(), e);
|
||||
throw new IndexShardSnapshotFailedException(shardId, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
} catch (IOException e) {
|
||||
// We cannot delete index file - this is fatal, we cannot continue, otherwise we might end up
|
||||
// with references to non-existing files
|
||||
throw new IndexShardSnapshotFailedException(shardId, "error deleting index files during cleanup, reason: " + e.getMessage(), e);
|
||||
throw new IndexShardSnapshotFailedException(shardId, "error deleting index files during cleanup", e);
|
||||
}
|
||||
|
||||
blobsToDelete = new ArrayList<>();
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.indices.recovery;
|
|||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -175,7 +176,7 @@ public class RecoveryStatus extends AbstractRefCounted {
|
|||
listener.onRecoveryFailure(state(), e, sendShardFailure);
|
||||
} finally {
|
||||
try {
|
||||
cancellableThreads.cancel("failed recovery [" + e.getMessage() + "]");
|
||||
cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]");
|
||||
} finally {
|
||||
// release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now
|
||||
decRef();
|
||||
|
|
|
@ -130,8 +130,17 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
|
|||
|
||||
}
|
||||
|
||||
protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryStatus.recoveryId(), retryAfter);
|
||||
retryRecovery(recoveryStatus, retryAfter, currentRequest);
|
||||
}
|
||||
|
||||
protected void retryRecovery(final RecoveryStatus recoveryStatus, final String reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
logger.trace("will retrying recovery with id [{}] in [{}] (reason [{}])", recoveryStatus.recoveryId(), retryAfter, reason);
|
||||
logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryStatus.recoveryId(), retryAfter, reason);
|
||||
retryRecovery(recoveryStatus, retryAfter, currentRequest);
|
||||
}
|
||||
|
||||
private void retryRecovery(final RecoveryStatus recoveryStatus, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
|
||||
try {
|
||||
recoveryStatus.resetRecovery();
|
||||
} catch (Throwable e) {
|
||||
|
@ -224,7 +233,7 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
|
|||
}
|
||||
|
||||
if (cause instanceof DelayRecoveryException) {
|
||||
retryRecovery(recoveryStatus, cause.getMessage(), recoverySettings.retryDelayStateSync(), request);
|
||||
retryRecovery(recoveryStatus, cause, recoverySettings.retryDelayStateSync(), request);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -69,7 +69,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
|
|||
// create a new IndexWriter
|
||||
logger.info("recovery failed for primary shadow shard, failing shard");
|
||||
// pass the failure as null, as we want to ensure the store is not marked as corrupted
|
||||
shard.failShard("primary relocation failed on shared filesystem caused by: [" + t.getMessage() + "]", null);
|
||||
shard.failShard("primary relocation failed on shared filesystem", t);
|
||||
} else {
|
||||
logger.info("recovery failed on shared filesystem", t);
|
||||
}
|
||||
|
|
|
@ -173,7 +173,7 @@ public abstract class InternalSignificantTerms<A extends InternalSignificantTerm
|
|||
for (InternalAggregation aggregation : aggregations) {
|
||||
InternalSignificantTerms<A, B> terms = (InternalSignificantTerms<A, B>) aggregation;
|
||||
for (Bucket bucket : terms.buckets) {
|
||||
List<Bucket> existingBuckets = buckets.get(bucket.getKey());
|
||||
List<Bucket> existingBuckets = buckets.get(bucket.getKeyAsString());
|
||||
if (existingBuckets == null) {
|
||||
existingBuckets = new ArrayList<>(aggregations.size());
|
||||
buckets.put(bucket.getKeyAsString(), existingBuckets);
|
||||
|
|
|
@ -484,7 +484,7 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
|
|||
}
|
||||
|
||||
protected void traceResponseSent(long requestId, String action, Throwable t) {
|
||||
tracerLog.trace("[{}][{}] sent error response (error: [{}])", requestId, action, t.getMessage());
|
||||
tracerLog.trace("[{}][{}] sent error response", t, requestId, action);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,6 +24,10 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndexSegments;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
|
||||
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
|
@ -38,6 +42,7 @@ import org.elasticsearch.common.util.MultiDataPathUpgrader;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.mapper.string.StringFieldMapperPositionIncrementGapTests;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
|
@ -319,7 +324,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
Version version = extractVersion(index);
|
||||
String indexName = loadIndex(index);
|
||||
importIndex(indexName);
|
||||
assertIndexSanity(indexName);
|
||||
assertIndexSanity(indexName, version);
|
||||
assertBasicSearchWorks(indexName);
|
||||
assertBasicAggregationWorks(indexName);
|
||||
assertRealtimeGetWorks(indexName);
|
||||
|
@ -339,11 +344,22 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
version.luceneVersion.minor == Version.CURRENT.luceneVersion.minor;
|
||||
}
|
||||
|
||||
void assertIndexSanity(String indexName) {
|
||||
void assertIndexSanity(String indexName, Version indexCreated) {
|
||||
GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices(indexName).get();
|
||||
assertEquals(1, getIndexResponse.indices().length);
|
||||
assertEquals(indexName, getIndexResponse.indices()[0]);
|
||||
Version actualVersionCreated = Version.indexCreated(getIndexResponse.getSettings().get(indexName));
|
||||
assertEquals(indexCreated, actualVersionCreated);
|
||||
ensureYellow(indexName);
|
||||
IndicesSegmentResponse segmentsResponse = client().admin().indices().prepareSegments(indexName).get();
|
||||
IndexSegments segments = segmentsResponse.getIndices().get(indexName);
|
||||
for (IndexShardSegments indexShardSegments : segments) {
|
||||
for (ShardSegments shardSegments : indexShardSegments) {
|
||||
for (Segment segment : shardSegments) {
|
||||
assertEquals(indexCreated.luceneVersion, segment.version);
|
||||
}
|
||||
}
|
||||
}
|
||||
SearchResponse test = client().prepareSearch(indexName).get();
|
||||
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1l));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> extends ESTestCase {
|
||||
|
||||
private static final int NUMBER_OF_TESTBUILDERS = 20;
|
||||
private static NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
/**
|
||||
* setup for the whole base test class
|
||||
*/
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
if (namedWriteableRegistry == null) {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
namedWriteableRegistry = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* create random shape that is put under test
|
||||
*/
|
||||
protected abstract SB createTestShapeBuilder();
|
||||
|
||||
/**
|
||||
* mutate the given shape so the returned shape is different
|
||||
*/
|
||||
protected abstract SB mutate(SB original) throws IOException;
|
||||
|
||||
/**
|
||||
* Test that creates new shape from a random test shape and checks both for equality
|
||||
*/
|
||||
public void testFromXContent() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
SB testShape = createTestShapeBuilder();
|
||||
XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
|
||||
if (randomBoolean()) {
|
||||
contentBuilder.prettyPrint();
|
||||
}
|
||||
XContentBuilder builder = testShape.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS);
|
||||
XContentParser shapeParser = XContentHelper.createParser(builder.bytes());
|
||||
XContentHelper.createParser(builder.bytes());
|
||||
shapeParser.nextToken();
|
||||
ShapeBuilder parsedShape = ShapeBuilder.parse(shapeParser);
|
||||
assertNotSame(testShape, parsedShape);
|
||||
assertEquals(testShape, parsedShape);
|
||||
assertEquals(testShape.hashCode(), parsedShape.hashCode());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test serialization and deserialization of the test shape.
|
||||
*/
|
||||
public void testSerialization() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
SB testShape = createTestShapeBuilder();
|
||||
SB deserializedShape = copyShape(testShape);
|
||||
assertEquals(deserializedShape, testShape);
|
||||
assertEquals(deserializedShape.hashCode(), testShape.hashCode());
|
||||
assertNotSame(deserializedShape, testShape);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test equality and hashCode properties
|
||||
*/
|
||||
public void testEqualsAndHashcode() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
SB firstShape = createTestShapeBuilder();
|
||||
assertFalse("shape is equal to null", firstShape.equals(null));
|
||||
assertFalse("shape is equal to incompatible type", firstShape.equals(""));
|
||||
assertTrue("shape is not equal to self", firstShape.equals(firstShape));
|
||||
assertThat("same shape's hashcode returns different values if called multiple times", firstShape.hashCode(),
|
||||
equalTo(firstShape.hashCode()));
|
||||
assertThat("different shapes should not be equal", mutate(firstShape), not(equalTo(firstShape)));
|
||||
assertThat("different shapes should have different hashcode", mutate(firstShape).hashCode(), not(equalTo(firstShape.hashCode())));
|
||||
|
||||
SB secondShape = copyShape(firstShape);
|
||||
assertTrue("shape is not equal to self", secondShape.equals(secondShape));
|
||||
assertTrue("shape is not equal to its copy", firstShape.equals(secondShape));
|
||||
assertTrue("equals is not symmetric", secondShape.equals(firstShape));
|
||||
assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(firstShape.hashCode()));
|
||||
|
||||
SB thirdShape = copyShape(secondShape);
|
||||
assertTrue("shape is not equal to self", thirdShape.equals(thirdShape));
|
||||
assertTrue("shape is not equal to its copy", secondShape.equals(thirdShape));
|
||||
assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(thirdShape.hashCode()));
|
||||
assertTrue("equals is not transitive", firstShape.equals(thirdShape));
|
||||
assertThat("shape copy's hashcode is different from original hashcode", firstShape.hashCode(), equalTo(thirdShape.hashCode()));
|
||||
assertTrue("equals is not symmetric", thirdShape.equals(secondShape));
|
||||
assertTrue("equals is not symmetric", thirdShape.equals(firstShape));
|
||||
}
|
||||
}
|
||||
|
||||
protected SB copyShape(SB original) throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
original.writeTo(output);
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
|
||||
ShapeBuilder prototype = (ShapeBuilder) namedWriteableRegistry.getPrototype(ShapeBuilder.class, original.getWriteableName());
|
||||
@SuppressWarnings("unchecked")
|
||||
SB copy = (SB) prototype.readFrom(in);
|
||||
return copy;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class CirlceBuilderTests extends AbstractShapeBuilderTestCase<CircleBuilder> {
|
||||
|
||||
@Override
|
||||
protected CircleBuilder createTestShapeBuilder() {
|
||||
double centerX = randomDoubleBetween(-180, 180, false);
|
||||
double centerY = randomDoubleBetween(-90, 90, false);
|
||||
return new CircleBuilder()
|
||||
.center(new Coordinate(centerX, centerY))
|
||||
.radius(randomDoubleBetween(0.1, 10.0, false), randomFrom(DistanceUnit.values()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CircleBuilder mutate(CircleBuilder original) throws IOException {
|
||||
CircleBuilder mutation = copyShape(original);
|
||||
double radius = original.radius();
|
||||
DistanceUnit unit = original.unit();
|
||||
|
||||
if (randomBoolean()) {
|
||||
mutation.center(new Coordinate(original.center().x/2, original.center().y/2));
|
||||
} else if (randomBoolean()) {
|
||||
radius = radius/2;
|
||||
} else {
|
||||
DistanceUnit newRandom = unit;
|
||||
while (newRandom == unit) {
|
||||
newRandom = randomFrom(DistanceUnit.values());
|
||||
};
|
||||
unit = newRandom;
|
||||
}
|
||||
return mutation.radius(radius, unit);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.spatial4j.core.shape.Rectangle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation;
|
||||
import org.elasticsearch.test.geo.RandomShapeGenerator;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase<EnvelopeBuilder> {
|
||||
|
||||
@Override
|
||||
protected EnvelopeBuilder createTestShapeBuilder() {
|
||||
EnvelopeBuilder envelope = new EnvelopeBuilder(randomFrom(Orientation.values()));
|
||||
Rectangle box = RandomShapeGenerator.xRandomRectangle(getRandom(), RandomShapeGenerator.xRandomPoint(getRandom()));
|
||||
envelope.topLeft(box.getMinX(), box.getMaxY())
|
||||
.bottomRight(box.getMaxX(), box.getMinY());
|
||||
return envelope;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected EnvelopeBuilder mutate(EnvelopeBuilder original) throws IOException {
|
||||
EnvelopeBuilder mutation = copyShape(original);
|
||||
if (randomBoolean()) {
|
||||
// toggle orientation
|
||||
mutation.orientation = (original.orientation == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT);
|
||||
} else {
|
||||
// move one corner to the middle of original
|
||||
switch (randomIntBetween(0, 3)) {
|
||||
case 0:
|
||||
mutation.topLeft(new Coordinate(randomDoubleBetween(-180.0, original.bottomRight.x, true), original.topLeft.y));
|
||||
break;
|
||||
case 1:
|
||||
mutation.topLeft(new Coordinate(original.topLeft.x, randomDoubleBetween(original.bottomRight.y, 90.0, true)));
|
||||
break;
|
||||
case 2:
|
||||
mutation.bottomRight(new Coordinate(randomDoubleBetween(original.topLeft.x, 180.0, true), original.bottomRight.y));
|
||||
break;
|
||||
case 3:
|
||||
mutation.bottomRight(new Coordinate(original.bottomRight.x, randomDoubleBetween(-90.0, original.topLeft.y, true)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
return mutation;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.test.geo.RandomShapeGenerator;
|
||||
import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType;
|
||||
|
||||
public class PointBuilderTests extends AbstractShapeBuilderTestCase<PointBuilder> {
|
||||
|
||||
@Override
|
||||
protected PointBuilder createTestShapeBuilder() {
|
||||
return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PointBuilder mutate(PointBuilder original) {
|
||||
return new PointBuilder().coordinate(new Coordinate(original.longitude()/2, original.latitude()/2));
|
||||
}
|
||||
}
|
|
@ -57,4 +57,20 @@ public class DistanceUnitTests extends ESTestCase {
|
|||
assertThat("Value can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This test ensures that we are aware of accidental reordering in the distance unit ordinals,
|
||||
* since equality in e.g. CircleShapeBuilder, hashCode and serialization rely on them
|
||||
*/
|
||||
public void testDistanceUnitNames() {
|
||||
assertEquals(0, DistanceUnit.INCH.ordinal());
|
||||
assertEquals(1, DistanceUnit.YARD.ordinal());
|
||||
assertEquals(2, DistanceUnit.FEET.ordinal());
|
||||
assertEquals(3, DistanceUnit.KILOMETERS.ordinal());
|
||||
assertEquals(4, DistanceUnit.NAUTICALMILES.ordinal());
|
||||
assertEquals(5, DistanceUnit.MILLIMETERS.ordinal());
|
||||
assertEquals(6, DistanceUnit.CENTIMETERS.ordinal());
|
||||
assertEquals(7, DistanceUnit.MILES.ordinal());
|
||||
assertEquals(8, DistanceUnit.METERS.ordinal());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -193,7 +193,7 @@ public class ZenFaultDetectionTests extends ESTestCase {
|
|||
masterFD.addListener(new MasterFaultDetection.Listener() {
|
||||
|
||||
@Override
|
||||
public void onMasterFailure(DiscoveryNode masterNode, String reason) {
|
||||
public void onMasterFailure(DiscoveryNode masterNode, Throwable cause, String reason) {
|
||||
failureNode[0] = masterNode;
|
||||
failureReason[0] = reason;
|
||||
notified.countDown();
|
||||
|
|
|
@ -48,24 +48,16 @@ public class SignificantTermsBackwardCompatibilityIT extends ESBackcompatTestCas
|
|||
static final String CLASS_FIELD = "class";
|
||||
|
||||
/**
|
||||
* Simple upgrade test for streaming significant terms buckets
|
||||
* Test for streaming significant terms buckets to old es versions.
|
||||
*/
|
||||
public void testBucketStreaming() throws IOException, ExecutionException, InterruptedException {
|
||||
logger.debug("testBucketStreaming: indexing documents");
|
||||
String type = randomBoolean() ? "string" : "long";
|
||||
String settings = "{\"index.number_of_shards\": 5, \"index.number_of_replicas\": 0}";
|
||||
index01Docs(type, settings);
|
||||
|
||||
logClusterState();
|
||||
boolean upgraded;
|
||||
int upgradedNodesCounter = 1;
|
||||
do {
|
||||
logger.debug("testBucketStreaming: upgrading {}st node", upgradedNodesCounter++);
|
||||
upgraded = backwardsCluster().upgradeOneNode();
|
||||
ensureGreen();
|
||||
logClusterState();
|
||||
checkSignificantTermsAggregationCorrect();
|
||||
} while (upgraded);
|
||||
logger.debug("testBucketStreaming: done testing significant terms while upgrading");
|
||||
}
|
||||
|
||||
|
@ -101,7 +93,7 @@ public class SignificantTermsBackwardCompatibilityIT extends ESBackcompatTestCas
|
|||
.execute()
|
||||
.actionGet();
|
||||
assertSearchResponse(response);
|
||||
StringTerms classes = (StringTerms) response.getAggregations().get("class");
|
||||
StringTerms classes = response.getAggregations().get("class");
|
||||
assertThat(classes.getBuckets().size(), equalTo(2));
|
||||
for (Terms.Bucket classBucket : classes.getBuckets()) {
|
||||
Map<String, Aggregation> aggs = classBucket.getAggregations().asMap();
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregation;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND;
|
||||
|
@ -38,6 +39,8 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Signi
|
|||
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser;
|
||||
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParserMapper;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
import org.elasticsearch.search.aggregations.support.format.ValueFormatter;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.TestSearchContext;
|
||||
|
@ -45,18 +48,11 @@ import org.elasticsearch.test.TestSearchContext;
|
|||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -83,24 +79,28 @@ public class SignificanceHeuristicTests extends ESTestCase {
|
|||
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
|
||||
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
|
||||
out.setVersion(version);
|
||||
|
||||
sigTerms[0].writeTo(out);
|
||||
|
||||
// read
|
||||
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
|
||||
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
|
||||
in.setVersion(version);
|
||||
|
||||
sigTerms[1].readFrom(in);
|
||||
|
||||
assertTrue(sigTerms[1].significanceHeuristic.equals(sigTerms[0].significanceHeuristic));
|
||||
InternalSignificantTerms.Bucket originalBucket = (InternalSignificantTerms.Bucket) sigTerms[0].buckets.get(0);
|
||||
InternalSignificantTerms.Bucket streamedBucket = (InternalSignificantTerms.Bucket) sigTerms[1].buckets.get(0);
|
||||
assertThat(originalBucket.getKeyAsString(), equalTo(streamedBucket.getKeyAsString()));
|
||||
assertThat(originalBucket.getSupersetDf(), equalTo(streamedBucket.getSupersetDf()));
|
||||
assertThat(originalBucket.getSubsetDf(), equalTo(streamedBucket.getSubsetDf()));
|
||||
assertThat(streamedBucket.getSubsetSize(), equalTo(10l));
|
||||
assertThat(streamedBucket.getSupersetSize(), equalTo(20l));
|
||||
}
|
||||
|
||||
InternalSignificantTerms[] getRandomSignificantTerms(SignificanceHeuristic heuristic) {
|
||||
InternalSignificantTerms[] sTerms = new InternalSignificantTerms[2];
|
||||
ArrayList<InternalSignificantTerms.Bucket> buckets = new ArrayList<>();
|
||||
if (randomBoolean()) {
|
||||
BytesRef term = new BytesRef("123.0");
|
||||
buckets.add(new SignificantLongTerms.Bucket(1, 2, 3, 4, 123, InternalAggregations.EMPTY, null));
|
||||
sTerms[0] = new SignificantLongTerms(10, 20, "some_name", null, 1, 1, heuristic, buckets,
|
||||
Collections.EMPTY_LIST, null);
|
||||
|
@ -125,6 +125,56 @@ public class SignificanceHeuristicTests extends ESTestCase {
|
|||
return heuristics.get(randomInt(3));
|
||||
}
|
||||
|
||||
public void testReduce() {
|
||||
List<InternalAggregation> aggs = createInternalAggregations();
|
||||
SignificantTerms reducedAgg = (SignificantTerms) aggs.get(0).doReduce(aggs, null);
|
||||
assertThat(reducedAgg.getBuckets().size(), equalTo(2));
|
||||
assertThat(reducedAgg.getBuckets().get(0).getSubsetDf(), equalTo(8l));
|
||||
assertThat(reducedAgg.getBuckets().get(0).getSubsetSize(), equalTo(16l));
|
||||
assertThat(reducedAgg.getBuckets().get(0).getSupersetDf(), equalTo(10l));
|
||||
assertThat(reducedAgg.getBuckets().get(0).getSupersetSize(), equalTo(30l));
|
||||
assertThat(reducedAgg.getBuckets().get(1).getSubsetDf(), equalTo(8l));
|
||||
assertThat(reducedAgg.getBuckets().get(1).getSubsetSize(), equalTo(16l));
|
||||
assertThat(reducedAgg.getBuckets().get(1).getSupersetDf(), equalTo(10l));
|
||||
assertThat(reducedAgg.getBuckets().get(1).getSupersetSize(), equalTo(30l));
|
||||
}
|
||||
|
||||
// Create aggregations as they might come from three different shards and return as list.
|
||||
private List<InternalAggregation> createInternalAggregations() {
|
||||
|
||||
String type = randomBoolean() ? "long" : "string";
|
||||
SignificanceHeuristic significanceHeuristic = getRandomSignificanceheuristic();
|
||||
|
||||
List<InternalAggregation> aggs = new ArrayList<>();
|
||||
List<InternalSignificantTerms.Bucket> terms0Buckets = new ArrayList<>();
|
||||
terms0Buckets.add(createBucket(type, 4, 4, 5, 10, 0));
|
||||
aggs.add(createAggregation(type, significanceHeuristic, terms0Buckets, 4, 10));
|
||||
List<InternalSignificantTerms.Bucket> terms1Buckets = new ArrayList<>();
|
||||
terms0Buckets.add(createBucket(type, 4, 4, 5, 10, 1));
|
||||
aggs.add(createAggregation(type, significanceHeuristic, terms1Buckets, 4, 10));
|
||||
List<InternalSignificantTerms.Bucket> terms01Buckets = new ArrayList<>();
|
||||
terms0Buckets.add(createBucket(type, 4, 8, 5, 10, 0));
|
||||
terms0Buckets.add(createBucket(type, 4, 8, 5, 10, 1));
|
||||
aggs.add(createAggregation(type, significanceHeuristic, terms01Buckets, 8, 10));
|
||||
return aggs;
|
||||
}
|
||||
|
||||
private InternalSignificantTerms createAggregation(String type, SignificanceHeuristic significanceHeuristic, List<InternalSignificantTerms.Bucket> buckets, long subsetSize, long supersetSize) {
|
||||
if (type.equals("string")) {
|
||||
return new SignificantStringTerms(subsetSize, supersetSize, "sig_terms", 2, -1, significanceHeuristic, buckets, new ArrayList<PipelineAggregator>(), new HashMap<String, Object>());
|
||||
} else {
|
||||
return new SignificantLongTerms(subsetSize, supersetSize, "sig_terms", ValueFormatter.RAW, 2, -1, significanceHeuristic, buckets, new ArrayList<PipelineAggregator>(), new HashMap<String, Object>());
|
||||
}
|
||||
}
|
||||
|
||||
private InternalSignificantTerms.Bucket createBucket(String type, long subsetDF, long subsetSize, long supersetDF, long supersetSize, long label) {
|
||||
if (type.equals("string")) {
|
||||
return new SignificantStringTerms.Bucket(new BytesRef(Long.toString(label).getBytes(StandardCharsets.UTF_8)), subsetDF, subsetSize, supersetDF, supersetSize, InternalAggregations.EMPTY);
|
||||
} else {
|
||||
return new SignificantLongTerms.Bucket(subsetDF, subsetSize, supersetDF, supersetSize, label, InternalAggregations.EMPTY, ValueFormatter.RAW);
|
||||
}
|
||||
}
|
||||
|
||||
// test that
|
||||
// 1. The output of the builders can actually be parsed
|
||||
// 2. The parser does not swallow parameters after a significance heuristic was defined
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -149,6 +149,16 @@ def start_node(version, release_dir, data_dir, repo_dir, tcp_port=DEFAULT_TRANSP
|
|||
cmd.append('-f') # version before 1.0 start in background automatically
|
||||
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
def install_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'install', [plugin_name])
|
||||
|
||||
def remove_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'remove', [plugin_name])
|
||||
|
||||
def run_plugin(version, release_dir, plugin_cmd, args):
|
||||
cmd = [os.path.join(release_dir, 'bin/plugin'), plugin_cmd] + args
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
def create_client(http_port=DEFAULT_HTTP_TCP_PORT, timeout=30):
|
||||
logging.info('Waiting for node to startup')
|
||||
for _ in range(0, timeout):
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
import create_bwc_index
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
def fetch_version(version):
|
||||
logging.info('fetching ES version %s' % version)
|
||||
if subprocess.call([sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'get-bwc-version.py'), version]) != 0:
|
||||
raise RuntimeError('failed to download ES version %s' % version)
|
||||
|
||||
def create_index(plugin, mapping, docs):
|
||||
'''
|
||||
Creates a static back compat index (.zip) with mappings using fields defined in plugins.
|
||||
'''
|
||||
|
||||
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,
|
||||
datefmt='%Y-%m-%d %I:%M:%S %p')
|
||||
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
|
||||
logging.getLogger('urllib3').setLevel(logging.WARN)
|
||||
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
plugin_installed = False
|
||||
node = None
|
||||
try:
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
repo_dir = os.path.join(tmp_dir, 'repo')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
logging.info('Temp repo dir: %s' % repo_dir)
|
||||
|
||||
version = '2.0.0'
|
||||
classifier = '%s-%s' %(plugin, version)
|
||||
index_name = 'index-%s' % classifier
|
||||
|
||||
# Download old ES releases if necessary:
|
||||
release_dir = os.path.join('backwards', 'elasticsearch-%s' % version)
|
||||
if not os.path.exists(release_dir):
|
||||
fetch_version(version)
|
||||
|
||||
create_bwc_index.install_plugin(version, release_dir, plugin)
|
||||
plugin_installed = True
|
||||
node = create_bwc_index.start_node(version, release_dir, data_dir, repo_dir, cluster_name=index_name)
|
||||
client = create_bwc_index.create_client()
|
||||
put_plugin_mappings(client, index_name, mapping, docs)
|
||||
create_bwc_index.shutdown_node(node)
|
||||
|
||||
print('%s server output:\n%s' % (version, node.stdout.read().decode('utf-8')))
|
||||
node = None
|
||||
create_bwc_index.compress_index(classifier, tmp_dir, 'plugins/%s/src/test/resources/indices/bwc' %plugin)
|
||||
finally:
|
||||
if node is not None:
|
||||
create_bwc_index.shutdown_node(node)
|
||||
if plugin_installed:
|
||||
create_bwc_index.remove_plugin(version, release_dir, plugin)
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
def put_plugin_mappings(client, index_name, mapping, docs):
|
||||
client.indices.delete(index=index_name, ignore=404)
|
||||
logging.info('Create single shard test index')
|
||||
|
||||
client.indices.create(index=index_name, body={
|
||||
'settings': {
|
||||
'number_of_shards': 1,
|
||||
'number_of_replicas': 0
|
||||
},
|
||||
'mappings': {
|
||||
'type': mapping
|
||||
}
|
||||
})
|
||||
health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
||||
logging.info('Indexing documents')
|
||||
for i in range(len(docs)):
|
||||
client.index(index=index_name, doc_type="type", id=str(i), body=docs[i])
|
||||
logging.info('Flushing index')
|
||||
client.indices.flush(index=index_name)
|
||||
|
||||
logging.info('Running basic checks')
|
||||
count = client.count(index=index_name)['count']
|
||||
assert count == len(docs), "expected %d docs, got %d" %(len(docs), count)
|
||||
|
||||
def main():
|
||||
docs = [
|
||||
{
|
||||
"foo": "abc"
|
||||
},
|
||||
{
|
||||
"foo": "abcdef"
|
||||
},
|
||||
{
|
||||
"foo": "a"
|
||||
}
|
||||
]
|
||||
|
||||
murmur3_mapping = {
|
||||
'properties': {
|
||||
'foo': {
|
||||
'type': 'string',
|
||||
'fields': {
|
||||
'hash': {
|
||||
'type': 'murmur3'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
create_index("mapper-murmur3", murmur3_mapping, docs)
|
||||
|
||||
size_mapping = {
|
||||
'_size': {
|
||||
'enabled': True
|
||||
}
|
||||
}
|
||||
|
||||
create_index("mapper-size", size_mapping, docs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -356,7 +356,7 @@ if __name__ == "__main__":
|
|||
debs3_list_cmd = 'deb-s3 list -b %s --prefix %s' % (bucket, debs3_prefix)
|
||||
debs3_verify_cmd = 'deb-s3 verify -b %s --prefix %s' % (bucket, debs3_prefix)
|
||||
rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/%s/centos' % (release_version, shortHash, package_repo_version)
|
||||
rpms3_upload_cmd = 'rpm-s3 -v -b %s -p %s --sign --visibility public-read -k 0 %s' % (bucket, rpms3_prefix, rpm)
|
||||
rpms3_upload_cmd = 'rpm-s3 -v -b %s -p %s --sign --visibility public-read -k 100 %s' % (bucket, rpms3_prefix, rpm)
|
||||
|
||||
if deploy_s3:
|
||||
run(s3cmd_sync_to_staging_bucket_cmd)
|
||||
|
|
|
@ -157,7 +157,7 @@ implementation used for these two methods, while not changing the
|
|||
`default`, it is possible to configure a similarity with the name
|
||||
`base`. This similarity will then be used for the two methods.
|
||||
|
||||
You can change the default similarity for all fields like this:
|
||||
You can change the default similarity for all fields by putting the following setting into `elasticsearch.yml`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
:jdk: 1.8.0_25
|
||||
:defguide: https://www.elastic.co/guide/en/elasticsearch/guide/current
|
||||
:plugins: https://www.elastic.co/guide/en/elasticsearch/plugins/master
|
||||
:issue: https://github.com/elastic/elasticsearch/issues
|
||||
:pull: https://github.com/elastic/elasticsearch/pull
|
||||
:issue: https://github.com/elastic/elasticsearch/issues/
|
||||
:pull: https://github.com/elastic/elasticsearch/pull/
|
||||
|
||||
include::getting-started.asciidoc[]
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ PUT my_index
|
|||
"my_type": {
|
||||
"_ttl": {
|
||||
"enabled": true,
|
||||
"defaut": "5m"
|
||||
"default": "5m"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.murmur3;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.plugin.mapper.MapperMurmur3Plugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
public class Murmur3FieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.singleton(MapperMurmur3Plugin.class);
|
||||
}
|
||||
|
||||
public void testUpgradeOldMapping() throws IOException {
|
||||
final String indexName = "index-mapper-murmur3-2.0.0";
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(indexName + ".zip");
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
|
||||
Path dataPath = createTempDir();
|
||||
Settings settings = Settings.builder()
|
||||
.put("path.data", dataPath)
|
||||
.build();
|
||||
final String node = internalCluster().startNode(settings);
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, node).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
dataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||
assertFalse(Files.exists(dataPath));
|
||||
Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices");
|
||||
Files.move(src, dataPath);
|
||||
|
||||
ensureYellow();
|
||||
final SearchResponse countResponse = client().prepareSearch(indexName).setSize(0).get();
|
||||
ElasticsearchAssertions.assertHitCount(countResponse, 3L);
|
||||
|
||||
final SearchResponse cardinalityResponse = client().prepareSearch(indexName).addAggregation(
|
||||
AggregationBuilders.cardinality("card").field("foo.hash")).get();
|
||||
Cardinality cardinality = cardinalityResponse.getAggregations().get("card");
|
||||
assertEquals(3L, cardinality.getValue());
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.size;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.plugin.mapper.MapperSizePlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
public class SizeFieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.singleton(MapperSizePlugin.class);
|
||||
}
|
||||
|
||||
public void testUpgradeOldMapping() throws IOException {
|
||||
final String indexName = "index-mapper-size-2.0.0";
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(indexName + ".zip");
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
|
||||
Path dataPath = createTempDir();
|
||||
Settings settings = Settings.builder()
|
||||
.put("path.data", dataPath)
|
||||
.build();
|
||||
final String node = internalCluster().startNode(settings);
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, node).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
dataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||
assertFalse(Files.exists(dataPath));
|
||||
Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices");
|
||||
Files.move(src, dataPath);
|
||||
|
||||
ensureYellow();
|
||||
final SearchResponse countResponse = client().prepareSearch(indexName).setSize(0).get();
|
||||
ElasticsearchAssertions.assertHitCount(countResponse, 3L);
|
||||
|
||||
final SearchResponse sizeResponse = client().prepareSearch(indexName)
|
||||
.addField("_source")
|
||||
.addField("_size")
|
||||
.get();
|
||||
ElasticsearchAssertions.assertHitCount(sizeResponse, 3L);
|
||||
for (SearchHit hit : sizeResponse.getHits().getHits()) {
|
||||
String source = hit.getSourceAsString();
|
||||
assertNotNull(source);
|
||||
Map<String, SearchHitField> fields = hit.getFields();
|
||||
assertTrue(fields.containsKey("_size"));
|
||||
Number size = fields.get("_size").getValue();
|
||||
assertNotNull(size);
|
||||
assertEquals(source.length(), size.longValue());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
|
@ -25,10 +25,9 @@ ext.pluginCount = 0
|
|||
for (Project subproj : project.rootProject.subprojects) {
|
||||
if (subproj.path.startsWith(':plugins:')) {
|
||||
integTest {
|
||||
def bundlePlugin = subproj.tasks.findByName('bundlePlugin')
|
||||
dependsOn bundlePlugin
|
||||
cluster {
|
||||
plugin subproj.name, bundlePlugin.outputs.files
|
||||
// need to get a non-decorated project object, so must re-lookup the project by path
|
||||
plugin subproj.name, project(subproj.path)
|
||||
}
|
||||
}
|
||||
pluginCount += 1
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
"REST test with headers":
|
||||
- skip:
|
||||
features: headers
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
body: { "body": "foo" }
|
||||
|
||||
- do:
|
||||
headers:
|
||||
Content-Type: application/yaml
|
||||
get:
|
||||
index: test_1
|
||||
type: _all
|
||||
id: 1
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^---\n
|
||||
_index:\s+\"test_1"\n
|
||||
_type:\s+"test"\n
|
||||
_id:\s+"1"\n
|
||||
_version:\s+1\n
|
||||
found:\s+true\n
|
||||
_source:\n
|
||||
\s+body:\s+"foo"\n$/
|
|
@ -62,7 +62,8 @@ public class RestTestExecutionContext implements Closeable {
|
|||
* Saves the obtained response in the execution context.
|
||||
* @throws RestException if the returned status code is non ok
|
||||
*/
|
||||
public RestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies) throws IOException, RestException {
|
||||
public RestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies,
|
||||
Map<String, String> headers) throws IOException, RestException {
|
||||
//makes a copy of the parameters before modifying them for this specific request
|
||||
HashMap<String, String> requestParams = new HashMap<>(params);
|
||||
for (Map.Entry<String, String> entry : requestParams.entrySet()) {
|
||||
|
@ -74,7 +75,7 @@ public class RestTestExecutionContext implements Closeable {
|
|||
String body = actualBody(bodies);
|
||||
|
||||
try {
|
||||
response = callApiInternal(apiName, requestParams, body);
|
||||
response = callApiInternal(apiName, requestParams, body, headers);
|
||||
//we always stash the last response body
|
||||
stash.stashValue("body", response.getBody());
|
||||
return response;
|
||||
|
@ -104,8 +105,8 @@ public class RestTestExecutionContext implements Closeable {
|
|||
return XContentFactory.jsonBuilder().map(body).string();
|
||||
}
|
||||
|
||||
private RestResponse callApiInternal(String apiName, Map<String, String> params, String body) throws IOException, RestException {
|
||||
return restClient.callApi(apiName, params, body);
|
||||
private RestResponse callApiInternal(String apiName, Map<String, String> params, String body, Map<String, String> headers) throws IOException, RestException {
|
||||
return restClient.callApi(apiName, params, body, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -132,7 +132,7 @@ public class RestClient implements Closeable {
|
|||
* @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored
|
||||
* according to the ignore parameter received as input (which won't get sent to elasticsearch)
|
||||
*/
|
||||
public RestResponse callApi(String apiName, Map<String, String> params, String body) throws IOException, RestException {
|
||||
public RestResponse callApi(String apiName, Map<String, String> params, String body, Map<String, String> headers) throws IOException, RestException {
|
||||
|
||||
List<Integer> ignores = new ArrayList<>();
|
||||
Map<String, String> requestParams = null;
|
||||
|
@ -151,6 +151,9 @@ public class RestClient implements Closeable {
|
|||
}
|
||||
|
||||
HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
|
||||
for (Map.Entry<String, String> header : headers.entrySet()) {
|
||||
httpRequestBuilder.addHeader(header.getKey(), header.getValue());
|
||||
}
|
||||
logger.debug("calling api [{}]", apiName);
|
||||
HttpResponse httpResponse = httpRequestBuilder.execute();
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.elasticsearch.test.rest.section.ApiCallSection;
|
|||
import org.elasticsearch.test.rest.section.DoSection;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Parser for do sections
|
||||
|
@ -40,6 +42,8 @@ public class DoSectionParser implements RestTestFragmentParser<DoSection> {
|
|||
XContentParser.Token token;
|
||||
|
||||
DoSection doSection = new DoSection();
|
||||
ApiCallSection apiCallSection = null;
|
||||
Map<String, String> headers = new HashMap<>();
|
||||
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -49,8 +53,17 @@ public class DoSectionParser implements RestTestFragmentParser<DoSection> {
|
|||
doSection.setCatch(parser.text());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (currentFieldName != null) {
|
||||
ApiCallSection apiCallSection = new ApiCallSection(currentFieldName);
|
||||
if ("headers".equals(currentFieldName)) {
|
||||
String headerName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
headerName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
headers.put(headerName, parser.text());
|
||||
}
|
||||
}
|
||||
} else if (currentFieldName != null) { // must be part of API call then
|
||||
apiCallSection = new ApiCallSection(currentFieldName);
|
||||
String paramName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
@ -73,17 +86,20 @@ public class DoSectionParser implements RestTestFragmentParser<DoSection> {
|
|||
}
|
||||
}
|
||||
}
|
||||
doSection.setApiCallSection(apiCallSection);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
parser.nextToken();
|
||||
|
||||
if (doSection.getApiCallSection() == null) {
|
||||
try {
|
||||
if (apiCallSection == null) {
|
||||
throw new RestTestParseException("client call section is mandatory within a do section");
|
||||
}
|
||||
|
||||
if (headers.isEmpty() == false) {
|
||||
apiCallSection.addHeaders(headers);
|
||||
}
|
||||
doSection.setApiCallSection(apiCallSection);
|
||||
} finally {
|
||||
parser.nextToken();
|
||||
}
|
||||
return doSection;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ public class ApiCallSection {
|
|||
|
||||
private final String api;
|
||||
private final Map<String, String> params = new HashMap<>();
|
||||
private final Map<String, String> headers = new HashMap<>();
|
||||
private final List<Map<String, Object>> bodies = new ArrayList<>();
|
||||
|
||||
public ApiCallSection(String api) {
|
||||
|
@ -56,6 +57,18 @@ public class ApiCallSection {
|
|||
this.params.put(key, value);
|
||||
}
|
||||
|
||||
public void addHeaders(Map<String, String> otherHeaders) {
|
||||
this.headers.putAll(otherHeaders);
|
||||
}
|
||||
|
||||
public void addHeader(String key, String value) {
|
||||
this.headers.put(key, value);
|
||||
}
|
||||
|
||||
public Map<String, String> getHeaders() {
|
||||
return unmodifiableMap(headers);
|
||||
}
|
||||
|
||||
public List<Map<String, Object>> getBodies() {
|
||||
return Collections.unmodifiableList(bodies);
|
||||
}
|
||||
|
|
|
@ -45,6 +45,9 @@ import static org.junit.Assert.fail;
|
|||
*
|
||||
* - do:
|
||||
* catch: missing
|
||||
* headers:
|
||||
* Authorization: Basic user:pass
|
||||
* Content-Type: application/json
|
||||
* update:
|
||||
* index: test_1
|
||||
* type: test
|
||||
|
@ -86,7 +89,8 @@ public class DoSection implements ExecutableSection {
|
|||
}
|
||||
|
||||
try {
|
||||
RestResponse restResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), apiCallSection.getBodies());
|
||||
RestResponse restResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(),
|
||||
apiCallSection.getBodies(), apiCallSection.getHeaders());
|
||||
if (Strings.hasLength(catchParam)) {
|
||||
String catchStatusCode;
|
||||
if (catches.containsKey(catchParam)) {
|
||||
|
|
|
@ -34,7 +34,7 @@ import java.util.List;
|
|||
*/
|
||||
public final class Features {
|
||||
|
||||
private static final List<String> SUPPORTED = Arrays.asList("stash_in_path", "groovy_scripting");
|
||||
private static final List<String> SUPPORTED = Arrays.asList("stash_in_path", "groovy_scripting", "headers");
|
||||
|
||||
private Features() {
|
||||
|
||||
|
|
|
@ -341,6 +341,29 @@ public class DoSectionParserTests extends AbstractParserTestCase {
|
|||
assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
|
||||
}
|
||||
|
||||
public void testParseDoSectionWithHeaders() throws Exception {
|
||||
parser = YamlXContent.yamlXContent.createParser(
|
||||
"headers:\n" +
|
||||
" Authorization: \"thing one\"\n" +
|
||||
" Content-Type: \"application/json\"\n" +
|
||||
"indices.get_warmer:\n" +
|
||||
" index: test_index\n" +
|
||||
" name: test_warmer"
|
||||
);
|
||||
|
||||
DoSectionParser doSectionParser = new DoSectionParser();
|
||||
DoSection doSection = doSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser));
|
||||
|
||||
assertThat(doSection.getApiCallSection(), notNullValue());
|
||||
assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_warmer"));
|
||||
assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2));
|
||||
assertThat(doSection.getApiCallSection().hasBody(), equalTo(false));
|
||||
assertThat(doSection.getApiCallSection().getHeaders(), notNullValue());
|
||||
assertThat(doSection.getApiCallSection().getHeaders().size(), equalTo(2));
|
||||
assertThat(doSection.getApiCallSection().getHeaders().get("Authorization"), equalTo("thing one"));
|
||||
assertThat(doSection.getApiCallSection().getHeaders().get("Content-Type"), equalTo("application/json"));
|
||||
}
|
||||
|
||||
public void testParseDoSectionWithoutClientCallSection() throws Exception {
|
||||
parser = YamlXContent.yamlXContent.createParser(
|
||||
"catch: missing\n"
|
||||
|
|
Loading…
Reference in New Issue