Merge branch 'master' into standalone_tests_intellij

This commit is contained in:
Ryan Ernst 2015-12-07 16:01:32 -08:00
commit fa1c708ccd
434 changed files with 7973 additions and 4891 deletions

10
.editorconfig Normal file
View File

@ -0,0 +1,10 @@
# EditorConfig: http://editorconfig.org/
root = true
[*.java]
charset = utf-8
indent_style = space
indent_size = 4
trim_trailing_whitespace = true
insert_final_newline = true

View File

@ -149,17 +149,23 @@ gradle test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix)
=== Load balancing and caches. === Load balancing and caches.
By default, the tests run sequentially on a single forked JVM. By default the tests run on up to 4 JVMs based on the number of cores. If you
want to explicitly specify the number of JVMs you can do so on the command
To run with more forked JVMs than the default use: line:
---------------------------- ----------------------------
gradle test -Dtests.jvms=8 gradle test -Dtests.jvms=8
---------------------------- ----------------------------
Don't count hypercores for CPU-intense tests and leave some slack Or in `~/.gradle/gradle.properties`:
for JVM-internal threads (like the garbage collector). Make sure there is
enough RAM to handle child JVMs. ----------------------------
systemProp.tests.jvms=8
----------------------------
Its difficult to pick the "right" number here. Hypercores don't count for CPU
intensive tests and you should leave some slack for JVM-interal threads like
the garbage collector. And you have to have enough RAM to handle each JVM.
=== Test compatibility. === Test compatibility.
@ -280,11 +286,20 @@ The REST layer is tested through specific tests that are shared between all
the elasticsearch official clients and consist of YAML files that describe the the elasticsearch official clients and consist of YAML files that describe the
operations to be executed and the obtained results that need to be tested. operations to be executed and the obtained results that need to be tested.
The REST tests are run automatically when executing the maven test command. To run only the The REST tests are run automatically when executing the "gradle check" command. To run only the
REST tests use the following command: REST tests use the following command:
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
gradle integTest -Dtests.filter="@Rest" gradle :distribution:tar:integTest \
-Dtests.class=org.elasticsearch.test.rest.RestIT
---------------------------------------------------------------------------
A specific test case can be run with
---------------------------------------------------------------------------
gradle :distribution:tar:integTest \
-Dtests.class=org.elasticsearch.test.rest.RestIT \
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
`RestNIT` are the executable test classes that runs all the `RestNIT` are the executable test classes that runs all the

View File

@ -45,7 +45,7 @@ subprojects {
} }
} }
} }
} }
extraArchive { extraArchive {
javadoc = true javadoc = true
tests = false tests = false
@ -86,8 +86,8 @@ subprojects {
tasks.withType(Jar) { tasks.withType(Jar) {
into('META-INF') { into('META-INF') {
from project.rootProject.rootDir from project.rootProject.rootDir
include 'LICENSE.txt' include 'LICENSE.txt'
include 'NOTICE.txt' include 'NOTICE.txt'
} }
} }
// ignore missing javadocs // ignore missing javadocs
@ -101,12 +101,19 @@ subprojects {
} }
} }
/* Sets up the dependencies that we build as part of this project but
register as thought they were external to resolve internally. We register
them as external dependencies so the build plugin that we use can be used
to build elasticsearch plugins outside of the elasticsearch source tree. */
ext.projectSubstitutions = [ ext.projectSubstitutions = [
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
"org.elasticsearch:elasticsearch:${version}": ':core', "org.elasticsearch:elasticsearch:${version}": ':core',
"org.elasticsearch:test-framework:${version}": ':test-framework', "org.elasticsearch:test-framework:${version}": ':test-framework',
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar' "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
] ]
configurations.all { configurations.all {
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
@ -232,7 +239,7 @@ class Run extends DefaultTask {
) )
public void setDebug(boolean enabled) { public void setDebug(boolean enabled) {
project.project(':distribution').run.clusterConfig.debug = enabled project.project(':distribution').run.clusterConfig.debug = enabled
} }
} }
task run(type: Run) { task run(type: Run) {
dependsOn ':distribution:run' dependsOn ':distribution:run'
@ -240,4 +247,3 @@ task run(type: Run) {
group = 'Verification' group = 'Verification'
impliesSubProjects = true impliesSubProjects = true
} }

View File

@ -80,3 +80,13 @@ eclipse {
defaultOutputDir = new File(file('build'), 'eclipse') defaultOutputDir = new File(file('build'), 'eclipse')
} }
} }
task copyEclipseSettings(type: Copy) {
from project.file('src/main/resources/eclipse.settings')
into '.settings'
}
// otherwise .settings is not nuked entirely
tasks.cleanEclipse {
delete '.settings'
}
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)

View File

@ -1,5 +1,6 @@
package com.carrotsearch.gradle.junit4 package com.carrotsearch.gradle.junit4
import org.gradle.api.tasks.Input
import org.gradle.util.ConfigureUtil import org.gradle.util.ConfigureUtil
class TestLoggingConfiguration { class TestLoggingConfiguration {
@ -20,6 +21,10 @@ class TestLoggingConfiguration {
SlowTestsConfiguration slowTests = new SlowTestsConfiguration() SlowTestsConfiguration slowTests = new SlowTestsConfiguration()
StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration() StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration()
/** Summarize the first N failures at the end of the test. */
@Input
int showNumFailuresAtEnd = 3 // match TextReport default
void slowTests(Closure closure) { void slowTests(Closure closure) {
ConfigureUtil.configure(closure, slowTests) ConfigureUtil.configure(closure, slowTests)
} }
@ -31,4 +36,8 @@ class TestLoggingConfiguration {
void outputMode(String mode) { void outputMode(String mode) {
outputMode = mode.toUpperCase() as OutputMode outputMode = mode.toUpperCase() as OutputMode
} }
void showNumFailuresAtEnd(int n) {
showNumFailuresAtEnd = n
}
} }

View File

@ -48,9 +48,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
/** Format line for JVM ID string. */ /** Format line for JVM ID string. */
String jvmIdFormat String jvmIdFormat
/** Summarize the first N failures at the end. */
int showNumFailuresAtEnd = 3
/** Output stream that logs messages to the given logger */ /** Output stream that logs messages to the given logger */
LoggingOutputStream outStream LoggingOutputStream outStream
LoggingOutputStream errStream LoggingOutputStream errStream
@ -110,13 +107,13 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
@Subscribe @Subscribe
void onQuit(AggregatedQuitEvent e) throws IOException { void onQuit(AggregatedQuitEvent e) throws IOException {
if (showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) { if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
List<Description> sublist = this.failedTests List<Description> sublist = this.failedTests
StringBuilder b = new StringBuilder() StringBuilder b = new StringBuilder()
b.append('Tests with failures') b.append('Tests with failures')
if (sublist.size() > showNumFailuresAtEnd) { if (sublist.size() > config.showNumFailuresAtEnd) {
sublist = sublist.subList(0, showNumFailuresAtEnd) sublist = sublist.subList(0, config.showNumFailuresAtEnd)
b.append(" (first " + showNumFailuresAtEnd + " out of " + failedTests.size() + ")") b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
} }
b.append(':\n') b.append(':\n')
for (Description description : sublist) { for (Description description : sublist) {

View File

@ -62,7 +62,7 @@ class BuildPlugin implements Plugin<Project> {
configureCompile(project) configureCompile(project)
configureTest(project) configureTest(project)
PrecommitTasks.configure(project) configurePrecommit(project)
} }
/** Performs checks on the build environment and prints information about the build environment. */ /** Performs checks on the build environment and prints information about the build environment. */
@ -283,6 +283,7 @@ class BuildPlugin implements Plugin<Project> {
/** Adds compiler settings to the project */ /** Adds compiler settings to the project */
static void configureCompile(Project project) { static void configureCompile(Project project) {
project.ext.compactProfile = 'compact3'
project.afterEvaluate { project.afterEvaluate {
// fail on all javac warnings // fail on all javac warnings
project.tasks.withType(JavaCompile) { project.tasks.withType(JavaCompile) {
@ -295,6 +296,11 @@ class BuildPlugin implements Plugin<Project> {
*/ */
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :) // don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing' options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing'
// compile with compact 3 profile by default
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
if (project.compactProfile != 'full') {
options.compilerArgs << '-profile' << project.compactProfile
}
options.encoding = 'UTF-8' options.encoding = 'UTF-8'
} }
} }
@ -365,6 +371,7 @@ class BuildPlugin implements Plugin<Project> {
enableSystemAssertions false enableSystemAssertions false
testLogging { testLogging {
showNumFailuresAtEnd 25
slowTests { slowTests {
heartbeat 10 heartbeat 10
summarySize 5 summarySize 5
@ -409,4 +416,11 @@ class BuildPlugin implements Plugin<Project> {
} }
return test return test
} }
private static configurePrecommit(Project project) {
Task precommit = PrecommitTasks.create(project, true)
project.check.dependsOn(precommit)
project.test.mustRunAfter(precommit)
project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided
}
} }

View File

@ -23,40 +23,41 @@ import org.elasticsearch.gradle.test.RestIntegTestTask
import org.elasticsearch.gradle.test.RunTask import org.elasticsearch.gradle.test.RunTask
import org.gradle.api.Project import org.gradle.api.Project
import org.gradle.api.Task import org.gradle.api.Task
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.bundling.Zip import org.gradle.api.tasks.bundling.Zip
/** /**
* Encapsulates build configuration for an Elasticsearch plugin. * Encapsulates build configuration for an Elasticsearch plugin.
*/ */
class PluginBuildPlugin extends BuildPlugin { public class PluginBuildPlugin extends BuildPlugin {
@Override @Override
void apply(Project project) { public void apply(Project project) {
super.apply(project) super.apply(project)
configureDependencies(project) configureDependencies(project)
// this afterEvaluate must happen before the afterEvaluate added by integTest configure, // this afterEvaluate must happen before the afterEvaluate added by integTest creation,
// so that the file name resolution for installing the plugin will be setup // so that the file name resolution for installing the plugin will be setup
project.afterEvaluate { project.afterEvaluate {
String name = project.pluginProperties.extension.name String name = project.pluginProperties.extension.name
project.jar.baseName = name project.jar.baseName = name
project.bundlePlugin.baseName = name project.bundlePlugin.baseName = name
project.integTest.dependsOn(project.bundlePlugin) project.integTest.dependsOn(project.bundlePlugin)
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
project.tasks.run.dependsOn(project.bundlePlugin) project.tasks.run.dependsOn(project.bundlePlugin)
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files) if (project.path.startsWith(':modules:')) {
} project.integTest.clusterConfig.module(project)
RestIntegTestTask.configure(project) project.tasks.run.clusterConfig.module(project)
RunTask.configure(project) } else {
Task bundle = configureBundleTask(project) project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar } project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
project.configurations.getByName('default').extendsFrom = [] }
project.artifacts {
archives bundle
'default' bundle
} }
createIntegTestTask(project)
createBundleTask(project)
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
} }
static void configureDependencies(Project project) { private static void configureDependencies(Project project) {
project.dependencies { project.dependencies {
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}" testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}"
@ -72,21 +73,36 @@ class PluginBuildPlugin extends BuildPlugin {
} }
} }
static Task configureBundleTask(Project project) { /** Adds an integTest task which runs rest tests */
PluginPropertiesTask buildProperties = project.tasks.create(name: 'pluginProperties', type: PluginPropertiesTask) private static void createIntegTestTask(Project project) {
File pluginMetadata = project.file("src/main/plugin-metadata") RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
project.sourceSets.test { integTest.mustRunAfter(project.precommit, project.test)
output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties') project.check.dependsOn(integTest)
resources { }
srcDir pluginMetadata
} /**
} * Adds a bundlePlugin task which builds the zip containing the plugin jars,
Task bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties]) * metadata, properties, and packaging files
bundle.configure { */
from buildProperties private static void createBundleTask(Project project) {
from pluginMetadata File pluginMetadata = project.file('src/main/plugin-metadata')
from project.jar
from bundle.project.configurations.runtime - bundle.project.configurations.provided // create a task to build the properties file for this plugin
PluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', PluginPropertiesTask.class)
// add the plugin properties and metadata to test resources, so unit tests can
// know about the plugin (used by test security code to statically initialize the plugin in unit tests)
SourceSet testSourceSet = project.sourceSets.test
testSourceSet.output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
testSourceSet.resources.srcDir(pluginMetadata)
// create the actual bundle task, which zips up all the files for the plugin
Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties]) {
from buildProperties // plugin properties file
from pluginMetadata // metadata (eg custom security policy)
from project.jar // this plugin's jar
from project.configurations.runtime - project.configurations.provided // the dep jars
// extra files for the plugin to go into the zip
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
from('src/main') { from('src/main') {
include 'config/**' include 'config/**'
@ -97,6 +113,13 @@ class PluginBuildPlugin extends BuildPlugin {
} }
} }
project.assemble.dependsOn(bundle) project.assemble.dependsOn(bundle)
return bundle
// remove jar from the archives (things that will be published), and set it to the zip
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
project.artifacts.add('archives', bundle)
// also make the zip the default artifact (used when depending on this project)
project.configurations.getByName('default').extendsFrom = []
project.artifacts.add('default', bundle)
} }
} }

View File

@ -18,64 +18,104 @@
*/ */
package org.elasticsearch.gradle.precommit package org.elasticsearch.gradle.precommit
import org.gradle.api.DefaultTask import org.gradle.api.*
import org.gradle.api.GradleException
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.file.FileCollection import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input import org.gradle.api.tasks.Input
import org.gradle.api.tasks.InputDirectory import org.gradle.api.tasks.InputDirectory
import org.gradle.api.tasks.InputFiles import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.StopActionException
import org.gradle.api.tasks.TaskAction import org.gradle.api.tasks.TaskAction
import org.gradle.api.tasks.VerificationTask
import java.nio.file.Files import java.nio.file.Files
import java.security.MessageDigest import java.security.MessageDigest
import java.util.regex.Matcher import java.util.regex.Matcher
import java.util.regex.Pattern import java.util.regex.Pattern
class DependencyLicensesTask extends DefaultTask { /**
* A task to check licenses for dependencies.
*
* There are two parts to the check:
* <ul>
* <li>LICENSE and NOTICE files</li>
* <li>SHA checksums for each dependency jar</li>
* </ul>
*
* The directory to find the license and sha files in defaults to the dir @{code licenses}
* in the project directory for this task. You can override this directory:
* <pre>
* dependencyLicenses {
* licensesDir = project.file('mybetterlicensedir')
* }
* </pre>
*
* The jar files to check default to the dependencies from the default configuration. You
* can override this, for example, to only check compile dependencies:
* <pre>
* dependencyLicenses {
* dependencies = project.configurations.compile
* }
* </pre>
*
* Every jar must have a {@code .sha1} file in the licenses dir. These can be managed
* automatically using the {@code updateShas} helper task that is created along
* with this task. It will add {@code .sha1} files for new jars that are in dependencies
* and remove old {@code .sha1} files that are no longer needed.
*
* Every jar must also have a LICENSE and NOTICE file. However, multiple jars can share
* LICENSE and NOTICE files by mapping a pattern to the same name.
* <pre>
* dependencyLicenses {
* mapping from: &#47;lucene-.*&#47;, to: 'lucene'
* }
* </pre>
*/
public class DependencyLicensesTask extends DefaultTask {
static final String SHA_EXTENSION = '.sha1' static final String SHA_EXTENSION = '.sha1'
static Task configure(Project project, Closure closure) { // TODO: we should be able to default this to eg compile deps, but we need to move the licenses
DependencyLicensesTask task = project.tasks.create(type: DependencyLicensesTask, name: 'dependencyLicenses') // check from distribution to core (ie this should only be run on java projects)
UpdateShasTask update = project.tasks.create(type: UpdateShasTask, name: 'updateShas') /** A collection of jar files that should be checked. */
update.parentTask = task
task.configure(closure)
project.check.dependsOn(task)
return task
}
@InputFiles @InputFiles
FileCollection dependencies public FileCollection dependencies
/** The directory to find the license and sha files in. */
@InputDirectory @InputDirectory
File licensesDir = new File(project.projectDir, 'licenses') public File licensesDir = new File(project.projectDir, 'licenses')
LinkedHashMap<String, String> mappings = new LinkedHashMap<>() /** A map of patterns to prefix, used to find the LICENSE and NOTICE file. */
private LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
/**
* Add a mapping from a regex pattern for the jar name, to a prefix to find
* the LICENSE and NOTICE file for that jar.
*/
@Input @Input
void mapping(Map<String, String> props) { public void mapping(Map<String, String> props) {
String from = props.get('from') String from = props.remove('from')
if (from == null) { if (from == null) {
throw new InvalidUserDataException('Missing "from" setting for license name mapping') throw new InvalidUserDataException('Missing "from" setting for license name mapping')
} }
String to = props.get('to') String to = props.remove('to')
if (to == null) { if (to == null) {
throw new InvalidUserDataException('Missing "to" setting for license name mapping') throw new InvalidUserDataException('Missing "to" setting for license name mapping')
} }
if (props.isEmpty() == false) {
throw new InvalidUserDataException("Unknown properties for mapping on dependencyLicenses: ${props.keySet()}")
}
mappings.put(from, to) mappings.put(from, to)
} }
@TaskAction @TaskAction
void checkDependencies() { public void checkDependencies() {
// TODO: empty license dir (or error when dir exists and no deps) if (dependencies.isEmpty()) {
if (licensesDir.exists() == false && dependencies.isEmpty() == false) { if (licensesDir.exists()) {
throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
}
return // no dependencies to check
} else if (licensesDir.exists() == false) {
throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies") throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies")
} }
// order is the same for keys and values iteration since we use a linked hashmap // order is the same for keys and values iteration since we use a linked hashmap
List<String> mapped = new ArrayList<>(mappings.values()) List<String> mapped = new ArrayList<>(mappings.values())
Pattern mappingsPattern = Pattern.compile('(' + mappings.keySet().join(')|(') + ')') Pattern mappingsPattern = Pattern.compile('(' + mappings.keySet().join(')|(') + ')')
@ -127,7 +167,7 @@ class DependencyLicensesTask extends DefaultTask {
} }
} }
void checkSha(File jar, String jarName, Set<File> shaFiles) { private void checkSha(File jar, String jarName, Set<File> shaFiles) {
File shaFile = new File(licensesDir, jarName + SHA_EXTENSION) File shaFile = new File(licensesDir, jarName + SHA_EXTENSION)
if (shaFile.exists() == false) { if (shaFile.exists() == false) {
throw new GradleException("Missing SHA for ${jarName}. Run 'gradle updateSHAs' to create") throw new GradleException("Missing SHA for ${jarName}. Run 'gradle updateSHAs' to create")
@ -143,7 +183,7 @@ class DependencyLicensesTask extends DefaultTask {
shaFiles.remove(shaFile) shaFiles.remove(shaFile)
} }
void checkFile(String name, String jarName, Map<String, Integer> counters, String type) { private void checkFile(String name, String jarName, Map<String, Integer> counters, String type) {
String fileName = "${name}-${type}" String fileName = "${name}-${type}"
Integer count = counters.get(fileName) Integer count = counters.get(fileName)
if (count == null) { if (count == null) {
@ -158,10 +198,12 @@ class DependencyLicensesTask extends DefaultTask {
counters.put(fileName, count + 1) counters.put(fileName, count + 1)
} }
static class UpdateShasTask extends DefaultTask { /** A helper task to update the sha files in the license dir. */
DependencyLicensesTask parentTask public static class UpdateShasTask extends DefaultTask {
private DependencyLicensesTask parentTask
@TaskAction @TaskAction
void updateShas() { public void updateShas() {
Set<File> shaFiles = new HashSet<File>() Set<File> shaFiles = new HashSet<File>()
parentTask.licensesDir.eachFile { parentTask.licensesDir.eachFile {
String name = it.getName() String name = it.getName()

View File

@ -19,10 +19,11 @@
package org.elasticsearch.gradle.precommit package org.elasticsearch.gradle.precommit
import org.gradle.api.DefaultTask import org.gradle.api.DefaultTask
import org.gradle.api.GradleException
import org.gradle.api.InvalidUserDataException
import org.gradle.api.file.FileCollection import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.InputFiles import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.OutputFile import org.gradle.api.tasks.OutputFile
import org.gradle.api.tasks.OutputFiles
import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.TaskAction import org.gradle.api.tasks.TaskAction
import org.gradle.api.tasks.util.PatternFilterable import org.gradle.api.tasks.util.PatternFilterable
@ -33,14 +34,19 @@ import java.util.regex.Pattern
/** /**
* Checks for patterns in source files for the project which are forbidden. * Checks for patterns in source files for the project which are forbidden.
*/ */
class ForbiddenPatternsTask extends DefaultTask { public class ForbiddenPatternsTask extends DefaultTask {
Map<String,String> patterns = new LinkedHashMap<>()
PatternFilterable filesFilter = new PatternSet() /** The rules: a map from the rule name, to a rule regex pattern. */
private Map<String,String> patterns = new LinkedHashMap<>()
/** A pattern set of which files should be checked. */
private PatternFilterable filesFilter = new PatternSet()
@OutputFile @OutputFile
File outputMarker = new File(project.buildDir, "markers/forbiddenPatterns") File outputMarker = new File(project.buildDir, "markers/forbiddenPatterns")
ForbiddenPatternsTask() { public ForbiddenPatternsTask() {
description = 'Checks source files for invalid patterns like nocommits or tabs'
// we always include all source files, and exclude what should not be checked // we always include all source files, and exclude what should not be checked
filesFilter.include('**') filesFilter.include('**')
// exclude known binary extensions // exclude known binary extensions
@ -52,23 +58,28 @@ class ForbiddenPatternsTask extends DefaultTask {
filesFilter.exclude('**/*.crt') filesFilter.exclude('**/*.crt')
filesFilter.exclude('**/*.png') filesFilter.exclude('**/*.png')
// TODO: add compile and test compile outputs as this tasks outputs, so we don't rerun when source files haven't changed // add mandatory rules
patterns.put('nocommit', /nocommit/)
patterns.put('tab', /\t/)
} }
/** Adds a file glob pattern to be excluded */ /** Adds a file glob pattern to be excluded */
void exclude(String... excludes) { public void exclude(String... excludes) {
this.filesFilter.exclude(excludes) this.filesFilter.exclude(excludes)
} }
/** Adds pattern to forbid */ /** Adds a pattern to forbid. T */
void rule(Map<String,String> props) { void rule(Map<String,String> props) {
String name = props.get('name') String name = props.remove('name')
if (name == null) { if (name == null) {
throw new IllegalArgumentException('Missing [name] for invalid pattern rule') throw new InvalidUserDataException('Missing [name] for invalid pattern rule')
} }
String pattern = props.get('pattern') String pattern = props.remove('pattern')
if (pattern == null) { if (pattern == null) {
throw new IllegalArgumentException('Missing [pattern] for invalid pattern rule') throw new InvalidUserDataException('Missing [pattern] for invalid pattern rule')
}
if (props.isEmpty() == false) {
throw new InvalidUserDataException("Unknown arguments for ForbiddenPatterns rule mapping: ${props.keySet()}")
} }
// TODO: fail if pattern contains a newline, it won't work (currently) // TODO: fail if pattern contains a newline, it won't work (currently)
patterns.put(name, pattern) patterns.put(name, pattern)
@ -89,14 +100,14 @@ class ForbiddenPatternsTask extends DefaultTask {
Pattern allPatterns = Pattern.compile('(' + patterns.values().join(')|(') + ')') Pattern allPatterns = Pattern.compile('(' + patterns.values().join(')|(') + ')')
List<String> failures = new ArrayList<>() List<String> failures = new ArrayList<>()
for (File f : files()) { for (File f : files()) {
f.eachLine('UTF-8') { line, lineNumber -> f.eachLine('UTF-8') { String line, int lineNumber ->
if (allPatterns.matcher(line).find()) { if (allPatterns.matcher(line).find()) {
addErrorMessages(failures, f, (String)line, (int)lineNumber) addErrorMessages(failures, f, line, lineNumber)
} }
} }
} }
if (failures.isEmpty() == false) { if (failures.isEmpty() == false) {
throw new IllegalArgumentException('Found invalid patterns:\n' + failures.join('\n')) throw new GradleException('Found invalid patterns:\n' + failures.join('\n'))
} }
outputMarker.setText('done', 'UTF-8') outputMarker.setText('done', 'UTF-8')
} }

View File

@ -0,0 +1,62 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit
import org.elasticsearch.gradle.LoggedExec
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.InputFile
import org.gradle.api.tasks.OutputFile
/**
* Runs CheckJarHell on a classpath.
*/
public class JarHellTask extends LoggedExec {
/**
* We use a simple "marker" file that we touch when the task succeeds
* as the task output. This is compared against the modified time of the
* inputs (ie the jars/class files).
*/
@OutputFile
public File successMarker = new File(project.buildDir, 'markers/jarHell')
/** The classpath to run jarhell check on, defaults to the test runtime classpath */
@InputFile
public FileCollection classpath = project.sourceSets.test.runtimeClasspath
public JarHellTask() {
project.afterEvaluate {
dependsOn(classpath)
description = "Runs CheckJarHell on ${classpath}"
executable = new File(project.javaHome, 'bin/java')
doFirst({
/* JarHell doesn't like getting directories that don't exist but
gradle isn't especially careful about that. So we have to do it
filter it ourselves. */
FileCollection taskClasspath = classpath.filter { it.exists() }
args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
})
doLast({
successMarker.parentFile.mkdirs()
successMarker.setText("", 'UTF-8')
})
}
}
}

View File

@ -18,16 +18,10 @@
*/ */
package org.elasticsearch.gradle.precommit package org.elasticsearch.gradle.precommit
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApisExtension
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
import org.gradle.api.GradleException
import org.gradle.api.Project import org.gradle.api.Project
import org.gradle.api.Task import org.gradle.api.Task
import org.gradle.api.file.FileCollection
import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaBasePlugin
import org.gradle.api.tasks.Exec
import org.gradle.api.tasks.TaskContainer
/** /**
* Validation tasks which should be run before committing. These run before tests. * Validation tasks which should be run before committing. These run before tests.
@ -35,36 +29,34 @@ import org.gradle.api.tasks.TaskContainer
class PrecommitTasks { class PrecommitTasks {
/** Adds a precommit task, which depends on non-test verification tasks. */ /** Adds a precommit task, which depends on non-test verification tasks. */
static void configure(Project project) { public static Task create(Project project, boolean includeDependencyLicenses) {
List precommitTasks = [
configureForbiddenApis(project),
configureForbiddenPatterns(project.tasks),
configureJarHell(project)]
Map precommitOptions = [ List<Task> precommitTasks = [
name: 'precommit', configureForbiddenApis(project),
group: JavaBasePlugin.VERIFICATION_GROUP, project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
description: 'Runs all non-test checks.', project.tasks.create('jarHell', JarHellTask.class)]
dependsOn: precommitTasks
]
Task precommit = project.tasks.create(precommitOptions)
project.check.dependsOn(precommit)
// delay ordering relative to test tasks, since they may not be setup yet // tasks with just tests don't need dependency licenses, so this flag makes adding
project.afterEvaluate { // the task optional
Task test = project.tasks.findByName('test') if (includeDependencyLicenses) {
if (test != null) { DependencyLicensesTask dependencyLicenses = project.tasks.create('dependencyLicenses', DependencyLicensesTask.class)
test.mustRunAfter(precommit) precommitTasks.add(dependencyLicenses)
} // we also create the updateShas helper task that is associated with dependencyLicenses
Task integTest = project.tasks.findByName('integTest') UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class)
if (integTest != null) { updateShas.parentTask = dependencyLicenses
integTest.mustRunAfter(precommit)
}
} }
Map<String, Object> precommitOptions = [
name: 'precommit',
group: JavaBasePlugin.VERIFICATION_GROUP,
description: 'Runs all non-test checks.',
dependsOn: precommitTasks
]
return project.tasks.create(precommitOptions)
} }
static Task configureForbiddenApis(Project project) { private static Task configureForbiddenApis(Project project) {
project.pluginManager.apply('de.thetaphi.forbiddenapis') project.pluginManager.apply(ForbiddenApisPlugin.class)
project.forbiddenApis { project.forbiddenApis {
internalRuntimeForbidden = true internalRuntimeForbidden = true
failOnUnsupportedJava = false failOnUnsupportedJava = false
@ -75,72 +67,18 @@ class PrecommitTasks {
Task mainForbidden = project.tasks.findByName('forbiddenApisMain') Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
if (mainForbidden != null) { if (mainForbidden != null) {
mainForbidden.configure { mainForbidden.configure {
bundledSignatures += ['jdk-system-out'] bundledSignatures += 'jdk-system-out'
signaturesURLs += [ signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
getClass().getResource('/forbidden/core-signatures.txt'),
getClass().getResource('/forbidden/third-party-signatures.txt')]
} }
} }
Task testForbidden = project.tasks.findByName('forbiddenApisTest') Task testForbidden = project.tasks.findByName('forbiddenApisTest')
if (testForbidden != null) { if (testForbidden != null) {
testForbidden.configure { testForbidden.configure {
signaturesURLs += [getClass().getResource('/forbidden/test-signatures.txt')] signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
} }
} }
Task forbiddenApis = project.tasks.findByName('forbiddenApis') Task forbiddenApis = project.tasks.findByName('forbiddenApis')
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
return forbiddenApis return forbiddenApis
} }
static Task configureForbiddenPatterns(TaskContainer tasks) {
Map options = [
name: 'forbiddenPatterns',
type: ForbiddenPatternsTask,
description: 'Checks source files for invalid patterns like nocommits or tabs',
]
return tasks.create(options) {
rule name: 'nocommit', pattern: /nocommit/
rule name: 'tab', pattern: /\t/
}
}
/**
* Adds a task to run jar hell before on the test classpath.
*
* We use a simple "marker" file that we touch when the task succeeds
* as the task output. This is compared against the modified time of the
* inputs (ie the jars/class files).
*/
static Task configureJarHell(Project project) {
File successMarker = new File(project.buildDir, 'markers/jarHell')
Exec task = project.tasks.create(name: 'jarHell', type: Exec)
FileCollection testClasspath = project.sourceSets.test.runtimeClasspath
task.dependsOn(testClasspath)
task.inputs.files(testClasspath)
task.outputs.file(successMarker)
task.executable = new File(project.javaHome, 'bin/java')
task.doFirst({
/* JarHell doesn't like getting directories that don't exist but
gradle isn't especially careful about that. So we have to do it
filter it ourselves. */
def taskClasspath = testClasspath.filter { it.exists() }
task.args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
})
if (task.logger.isInfoEnabled() == false) {
task.standardOutput = new ByteArrayOutputStream()
task.errorOutput = task.standardOutput
task.ignoreExitValue = true
task.doLast({
if (execResult.exitValue != 0) {
logger.error(standardOutput.toString())
throw new GradleException("JarHell failed")
}
})
}
task.doLast({
successMarker.parentFile.mkdirs()
successMarker.setText("", 'UTF-8')
})
return task
}
} }

View File

@ -0,0 +1,66 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit
import org.gradle.api.DefaultTask
import org.gradle.api.tasks.TaskAction
import java.nio.file.Files
import java.security.MessageDigest
/**
* A task to update shas used by {@code DependencyLicensesCheck}
*/
public class UpdateShasTask extends DefaultTask {
/** The parent dependency licenses task to use configuration from */
public DependencyLicensesTask parentTask
public UpdateShasTask() {
description = 'Updates the sha files for the dependencyLicenses check'
onlyIf { parentTask.licensesDir.exists() }
}
@TaskAction
public void updateShas() {
Set<File> shaFiles = new HashSet<File>()
parentTask.licensesDir.eachFile {
String name = it.getName()
if (name.endsWith(DependencyLicensesTask.SHA_EXTENSION)) {
shaFiles.add(it)
}
}
for (File dependency : parentTask.dependencies) {
String jarName = dependency.getName()
File shaFile = new File(parentTask.licensesDir, jarName + DependencyLicensesTask.SHA_EXTENSION)
if (shaFile.exists() == false) {
logger.lifecycle("Adding sha for ${jarName}")
String sha = MessageDigest.getInstance("SHA-1").digest(dependency.getBytes()).encodeHex().toString()
shaFile.setText(sha, 'UTF-8')
} else {
shaFiles.remove(shaFile)
}
}
shaFiles.each { shaFile ->
logger.lifecycle("Removing unused sha ${shaFile.getName()}")
Files.delete(shaFile.toPath())
}
}
}

View File

@ -27,7 +27,7 @@ import org.gradle.api.tasks.Input
class ClusterConfiguration { class ClusterConfiguration {
@Input @Input
String distribution = 'zip' String distribution = 'integ-test-zip'
@Input @Input
int numNodes = 1 int numNodes = 1
@ -71,6 +71,8 @@ class ClusterConfiguration {
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>() LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
List<Project> modules = new ArrayList<>()
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>() LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
@Input @Input
@ -93,6 +95,12 @@ class ClusterConfiguration {
plugins.put(name, pluginProject) plugins.put(name, pluginProject)
} }
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
@Input
void module(Project moduleProject) {
modules.add(moduleProject)
}
@Input @Input
void setupCommand(String name, Object... args) { void setupCommand(String name, Object... args) {
setupCommands.put(name, args) setupCommands.put(name, args)

View File

@ -27,9 +27,7 @@ import org.gradle.api.*
import org.gradle.api.artifacts.Configuration import org.gradle.api.artifacts.Configuration
import org.gradle.api.file.FileCollection import org.gradle.api.file.FileCollection
import org.gradle.api.logging.Logger import org.gradle.api.logging.Logger
import org.gradle.api.tasks.Copy import org.gradle.api.tasks.*
import org.gradle.api.tasks.Delete
import org.gradle.api.tasks.Exec
import java.nio.file.Paths import java.nio.file.Paths
@ -62,7 +60,12 @@ class ClusterFormationTasks {
/** Adds a dependency on the given distribution */ /** Adds a dependency on the given distribution */
static void configureDistributionDependency(Project project, String distro) { static void configureDistributionDependency(Project project, String distro) {
String elasticsearchVersion = VersionProperties.elasticsearch String elasticsearchVersion = VersionProperties.elasticsearch
String packaging = distro == 'tar' ? 'tar.gz' : distro String packaging = distro
if (distro == 'tar') {
packaging = 'tar.gz'
} else if (distro == 'integ-test-zip') {
packaging = 'zip'
}
project.configurations { project.configurations {
elasticsearchDistro elasticsearchDistro
} }
@ -105,6 +108,12 @@ class ClusterFormationTasks {
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node) setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node) setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
// install modules
for (Project module : node.config.modules) {
String actionName = pluginTaskName('install', module.name, 'Module')
setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module)
}
// install plugins // install plugins
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) { for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin') String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
@ -132,8 +141,15 @@ class ClusterFormationTasks {
/** Adds a task to extract the elasticsearch distribution */ /** Adds a task to extract the elasticsearch distribution */
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) { static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) {
List extractDependsOn = [project.configurations.elasticsearchDistro, setup] List extractDependsOn = [project.configurations.elasticsearchDistro, setup]
/* project.configurations.elasticsearchDistro.singleFile will be an
external artifact if this is being run by a plugin not living in the
elasticsearch source tree. If this is a plugin built in the
elasticsearch source tree or this is a distro in the elasticsearch
source tree then this should be the version of elasticsearch built
by the source tree. If it isn't then Bad Things(TM) will happen. */
Task extract Task extract
switch (node.config.distribution) { switch (node.config.distribution) {
case 'integ-test-zip':
case 'zip': case 'zip':
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) } from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) }
@ -148,6 +164,33 @@ class ClusterFormationTasks {
into node.baseDir into node.baseDir
} }
break; break;
case 'rpm':
File rpmDatabase = new File(node.baseDir, 'rpm-database')
File rpmExtracted = new File(node.baseDir, 'rpm-extracted')
/* Delay reading the location of the rpm file until task execution */
Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}"
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers',
'--dbpath', rpmDatabase,
'--relocate', "/=${rpmExtracted}",
'-i', rpm
doFirst {
rpmDatabase.deleteDir()
rpmExtracted.deleteDir()
}
}
break;
case 'deb':
/* Delay reading the location of the deb file until task execution */
File debExtracted = new File(node.baseDir, 'deb-extracted')
Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}"
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
commandLine 'dpkg-deb', '-x', deb, debExtracted
doFirst {
debExtracted.deleteDir()
}
}
break;
default: default:
throw new InvalidUserDataException("Unknown distribution: ${node.config.distribution}") throw new InvalidUserDataException("Unknown distribution: ${node.config.distribution}")
} }
@ -172,7 +215,7 @@ class ClusterFormationTasks {
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
writeConfig.doFirst { writeConfig.doFirst {
File configFile = new File(node.homeDir, 'config/elasticsearch.yml') File configFile = new File(node.confDir, 'elasticsearch.yml')
logger.info("Configuring ${configFile}") logger.info("Configuring ${configFile}")
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8') configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
} }
@ -185,7 +228,8 @@ class ClusterFormationTasks {
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup) Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) { for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
Closure delayedSrc = { copyConfig.doFirst {
// make sure the copy won't be a no-op or act on a directory
File srcConfigFile = project.file(extraConfigFile.getValue()) File srcConfigFile = project.file(extraConfigFile.getValue())
if (srcConfigFile.isDirectory()) { if (srcConfigFile.isDirectory()) {
throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}") throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}")
@ -193,11 +237,10 @@ class ClusterFormationTasks {
if (srcConfigFile.exists() == false) { if (srcConfigFile.exists() == false) {
throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}") throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}")
} }
return srcConfigFile
} }
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey()) File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
copyConfig.from(delayedSrc) copyConfig.into(destConfigFile.canonicalFile.parentFile)
.into(destConfigFile.canonicalFile.parentFile) .from({ extraConfigFile.getValue() }) // wrap in closure to delay resolution to execution time
.rename { destConfigFile.name } .rename { destConfigFile.name }
} }
return copyConfig return copyConfig
@ -255,6 +298,20 @@ class ClusterFormationTasks {
return copyPlugins return copyPlugins
} }
static Task configureInstallModuleTask(String name, Project project, Task setup, NodeInfo node, Project module) {
if (node.config.distribution != 'integ-test-zip') {
throw new GradleException("Module ${module.path} not allowed be installed distributions other than integ-test-zip because they should already have all modules bundled!")
}
if (module.plugins.hasPlugin(PluginBuildPlugin) == false) {
throw new GradleException("Task ${name} cannot include module ${module.path} which is not an esplugin")
}
Copy installModule = project.tasks.create(name, Copy.class)
installModule.dependsOn(setup)
installModule.into(new File(node.homeDir, "modules/${module.name}"))
installModule.from({ project.zipTree(module.tasks.bundlePlugin.outputs.files.singleFile) })
return installModule
}
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) { static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
FileCollection pluginZip FileCollection pluginZip
if (plugin instanceof Project) { if (plugin instanceof Project) {
@ -284,18 +341,27 @@ class ClusterFormationTasks {
/** Adds a task to start an elasticsearch node with the given configuration */ /** Adds a task to start an elasticsearch node with the given configuration */
static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) { static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) {
String executable
List<String> esArgs = []
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
executable = 'cmd'
esArgs.add('/C')
esArgs.add('call')
} else {
executable = 'sh'
}
// this closure is converted into ant nodes by groovy's AntBuilder // this closure is converted into ant nodes by groovy's AntBuilder
Closure antRunner = { AntBuilder ant -> Closure antRunner = { AntBuilder ant ->
ant.exec(executable: node.executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
node.env.each { key, value -> env(key: key, value: value) }
node.args.each { arg(value: it) }
}
}
// this closure is the actual code to run elasticsearch
Closure elasticsearchRunner = {
// Due to how ant exec works with the spawn option, we lose all stdout/stderr from the
// process executed. To work around this, when spawning, we wrap the elasticsearch start
// command inside another shell script, which simply internally redirects the output
// of the real elasticsearch script. This allows ant to keep the streams open with the
// dummy process, but us to have the output available if there is an error in the
// elasticsearch start script
if (node.config.daemonize) {
node.writeWrapperScript()
}
// we must add debug options inside the closure so the config is read at execution time, as // we must add debug options inside the closure so the config is read at execution time, as
// gradle task options are not processed until the end of the configuration phase // gradle task options are not processed until the end of the configuration phase
if (node.config.debug) { if (node.config.debug) {
@ -303,37 +369,6 @@ class ClusterFormationTasks {
node.env['JAVA_OPTS'] = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000' node.env['JAVA_OPTS'] = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000'
} }
// Due to how ant exec works with the spawn option, we lose all stdout/stderr from the
// process executed. To work around this, when spawning, we wrap the elasticsearch start
// command inside another shell script, which simply internally redirects the output
// of the real elasticsearch script. This allows ant to keep the streams open with the
// dummy process, but us to have the output available if there is an error in the
// elasticsearch start script
String script = node.esScript
if (node.config.daemonize) {
String scriptName = 'run'
String argsPasser = '"$@"'
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
scriptName += '.bat'
argsPasser = '%*'
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
}
File wrapperScript = new File(node.cwd, scriptName)
wrapperScript.setText("\"${script}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
script = wrapperScript.toString()
}
ant.exec(executable: executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
node.env.each { key, value -> env(key: key, value: value) }
arg(value: script)
node.args.each { arg(value: it) }
}
}
// this closure is the actual code to run elasticsearch
Closure elasticsearchRunner = {
node.getCommandString().eachLine { line -> logger.info(line) } node.getCommandString().eachLine { line -> logger.info(line) }
if (logger.isInfoEnabled() || node.config.daemonize == false) { if (logger.isInfoEnabled() || node.config.daemonize == false) {
@ -405,14 +440,19 @@ class ClusterFormationTasks {
// We already log the command at info level. No need to do it twice. // We already log the command at info level. No need to do it twice.
node.getCommandString().eachLine { line -> logger.error(line) } node.getCommandString().eachLine { line -> logger.error(line) }
} }
// the waitfor failed, so dump any output we got (may be empty if info logging, but that is ok) logger.error("Node ${node.nodeNum} output:")
logger.error("Node ${node.nodeNum} ant output:") logger.error("|-----------------------------------------")
node.buffer.toString('UTF-8').eachLine { line -> logger.error(line) } logger.error("| failure marker exists: ${node.failedMarker.exists()}")
logger.error("| pid file exists: ${node.pidFile.exists()}")
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
logger.error("|\n| [ant output]")
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
// also dump the log file for the startup script (which will include ES logging output to stdout) // also dump the log file for the startup script (which will include ES logging output to stdout)
if (node.startLog.exists()) { if (node.startLog.exists()) {
logger.error("Node ${node.nodeNum} log:") logger.error("|\n| [log]")
node.startLog.eachLine { line -> logger.error(line) } node.startLog.eachLine { line -> logger.error("| ${line}") }
} }
logger.error("|-----------------------------------------")
} }
throw new GradleException(msg) throw new GradleException(msg)
} }

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.gradle.test package org.elasticsearch.gradle.test
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.VersionProperties
import org.gradle.api.InvalidUserDataException import org.gradle.api.InvalidUserDataException
import org.gradle.api.Project import org.gradle.api.Project
@ -45,6 +46,12 @@ class NodeInfo {
/** elasticsearch home dir */ /** elasticsearch home dir */
File homeDir File homeDir
/** config directory */
File confDir
/** THE config file */
File configFile
/** working directory for the node process */ /** working directory for the node process */
File cwd File cwd
@ -63,8 +70,14 @@ class NodeInfo {
/** arguments to start the node with */ /** arguments to start the node with */
List<String> args List<String> args
/** Executable to run the bin/elasticsearch with, either cmd or sh */
String executable
/** Path to the elasticsearch start script */ /** Path to the elasticsearch start script */
String esScript File esScript
/** script to run when running in the background */
File wrapperScript
/** buffer for ant output when starting this node */ /** buffer for ant output when starting this node */
ByteArrayOutputStream buffer = new ByteArrayOutputStream() ByteArrayOutputStream buffer = new ByteArrayOutputStream()
@ -77,34 +90,75 @@ class NodeInfo {
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}") baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
pidFile = new File(baseDir, 'es.pid') pidFile = new File(baseDir, 'es.pid')
homeDir = homeDir(baseDir, config.distribution) homeDir = homeDir(baseDir, config.distribution)
confDir = confDir(baseDir, config.distribution)
configFile = new File(confDir, 'elasticsearch.yml')
cwd = new File(baseDir, "cwd") cwd = new File(baseDir, "cwd")
failedMarker = new File(cwd, 'run.failed') failedMarker = new File(cwd, 'run.failed')
startLog = new File(cwd, 'run.log') startLog = new File(cwd, 'run.log')
pluginsTmpDir = new File(baseDir, "plugins tmp") pluginsTmpDir = new File(baseDir, "plugins tmp")
args = []
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
executable = 'cmd'
args.add('/C')
args.add('"') // quote the entire command
wrapperScript = new File(cwd, "run.bat")
esScript = new File(homeDir, 'bin/elasticsearch.bat')
} else {
executable = 'sh'
wrapperScript = new File(cwd, "run")
esScript = new File(homeDir, 'bin/elasticsearch')
}
if (config.daemonize) {
args.add("${wrapperScript}")
} else {
args.add("${esScript}")
}
env = [ env = [
'JAVA_HOME' : project.javaHome, 'JAVA_HOME' : project.javaHome,
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc 'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
] ]
args = config.systemProperties.collect { key, value -> "-D${key}=${value}" } args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
for (Map.Entry<String, String> property : System.properties.entrySet()) { for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('es.')) { if (property.getKey().startsWith('es.')) {
args.add("-D${property.getKey()}=${property.getValue()}") args.add("-D${property.getKey()}=${property.getValue()}")
} }
} }
// running with cmd on windows will look for this with the .bat extension args.add("-Des.path.conf=${confDir}")
esScript = new File(homeDir, 'bin/elasticsearch').toString() if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}
} }
/** Returns debug string for the command that started this node. */ /** Returns debug string for the command that started this node. */
String getCommandString() { String getCommandString() {
String esCommandString = "Elasticsearch node ${nodeNum} command: ${esScript} " String esCommandString = "\nNode ${nodeNum} configuration:\n"
esCommandString += args.join(' ') esCommandString += "|-----------------------------------------\n"
esCommandString += '\nenvironment:' esCommandString += "| cwd: ${cwd}\n"
env.each { k, v -> esCommandString += "\n ${k}: ${v}" } esCommandString += "| command: ${executable} ${args.join(' ')}\n"
esCommandString += '| environment:\n'
env.each { k, v -> esCommandString += "| ${k}: ${v}\n" }
if (config.daemonize) {
esCommandString += "|\n| [${wrapperScript.name}]\n"
wrapperScript.eachLine('UTF-8', { line -> esCommandString += " ${line}\n"})
}
esCommandString += '|\n| [elasticsearch.yml]\n'
configFile.eachLine('UTF-8', { line -> esCommandString += "| ${line}\n" })
esCommandString += "|-----------------------------------------"
return esCommandString return esCommandString
} }
void writeWrapperScript() {
String argsPasser = '"$@"'
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
argsPasser = '%*'
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
}
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
}
/** Returns the http port for this node */ /** Returns the http port for this node */
int httpPort() { int httpPort() {
return config.baseHttpPort + nodeNum return config.baseHttpPort + nodeNum
@ -119,13 +173,32 @@ class NodeInfo {
static File homeDir(File baseDir, String distro) { static File homeDir(File baseDir, String distro) {
String path String path
switch (distro) { switch (distro) {
case 'integ-test-zip':
case 'zip': case 'zip':
case 'tar': case 'tar':
path = "elasticsearch-${VersionProperties.elasticsearch}" path = "elasticsearch-${VersionProperties.elasticsearch}"
break; break
case 'rpm':
case 'deb':
path = "${distro}-extracted/usr/share/elasticsearch"
break
default: default:
throw new InvalidUserDataException("Unknown distribution: ${distro}") throw new InvalidUserDataException("Unknown distribution: ${distro}")
} }
return new File(baseDir, path) return new File(baseDir, path)
} }
static File confDir(File baseDir, String distro) {
switch (distro) {
case 'integ-test-zip':
case 'zip':
case 'tar':
return new File(homeDir(baseDir, distro), 'config')
case 'rpm':
case 'deb':
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")
default:
throw new InvalidUserDataException("Unkown distribution: ${distro}")
}
}
} }

View File

@ -31,54 +31,38 @@ import org.gradle.util.ConfigureUtil
* Runs integration tests, but first starts an ES cluster, * Runs integration tests, but first starts an ES cluster,
* and passes the ES cluster info as parameters to the tests. * and passes the ES cluster info as parameters to the tests.
*/ */
class RestIntegTestTask extends RandomizedTestingTask { public class RestIntegTestTask extends RandomizedTestingTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration() ClusterConfiguration clusterConfig = new ClusterConfiguration()
/** Flag indicating whether the rest tests in the rest spec should be run. */
@Input @Input
boolean includePackaged = false boolean includePackaged = false
static RestIntegTestTask configure(Project project) { public RestIntegTestTask() {
Map integTestOptions = [ description = 'Runs rest tests against an elasticsearch cluster.'
name: 'integTest', group = JavaBasePlugin.VERIFICATION_GROUP
type: RestIntegTestTask, dependsOn(project.testClasses)
dependsOn: 'testClasses', classpath = project.sourceSets.test.runtimeClasspath
group: JavaBasePlugin.VERIFICATION_GROUP, testClassesDir = project.sourceSets.test.output.classesDir
description: 'Runs rest tests against an elasticsearch cluster.'
] // start with the common test configuration
RestIntegTestTask integTest = project.tasks.create(integTestOptions) configure(BuildPlugin.commonTestConfig(project))
integTest.configure(BuildPlugin.commonTestConfig(project)) // override/add more for rest tests
integTest.configure { parallelism = '1'
include '**/*IT.class' include('**/*IT.class')
systemProperty 'tests.rest.load_packaged', 'false' systemProperty('tests.rest.load_packaged', 'false')
}
RandomizedTestingTask test = project.tasks.findByName('test') // copy the rest spec/tests into the test resources
if (test != null) {
integTest.classpath = test.classpath
integTest.testClassesDir = test.testClassesDir
integTest.mustRunAfter(test)
}
project.check.dependsOn(integTest)
RestSpecHack.configureDependencies(project) RestSpecHack.configureDependencies(project)
project.afterEvaluate { project.afterEvaluate {
integTest.dependsOn(RestSpecHack.configureTask(project, integTest.includePackaged)) dependsOn(RestSpecHack.configureTask(project, includePackaged))
systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}")
} }
return integTest
}
RestIntegTestTask() {
// this must run after all projects have been configured, so we know any project // this must run after all projects have been configured, so we know any project
// references can be accessed as a fully configured // references can be accessed as a fully configured
project.gradle.projectsEvaluated { project.gradle.projectsEvaluated {
Task test = project.tasks.findByName('test')
if (test != null) {
mustRunAfter(test)
}
ClusterFormationTasks.setup(project, this, clusterConfig) ClusterFormationTasks.setup(project, this, clusterConfig)
configure {
parallelism '1'
systemProperty 'tests.cluster', "localhost:${clusterConfig.baseTransportPort}"
}
} }
} }
@ -91,11 +75,11 @@ class RestIntegTestTask extends RandomizedTestingTask {
} }
@Input @Input
void cluster(Closure closure) { public void cluster(Closure closure) {
ConfigureUtil.configure(closure, clusterConfig) ConfigureUtil.configure(closure, clusterConfig)
} }
ClusterConfiguration getCluster() { public ClusterConfiguration getCluster() {
return clusterConfig return clusterConfig
} }
} }

View File

@ -28,12 +28,12 @@ import org.gradle.api.tasks.Copy
* currently must be available on the local filesystem. This class encapsulates * currently must be available on the local filesystem. This class encapsulates
* setting up tasks to copy the rest spec api to test resources. * setting up tasks to copy the rest spec api to test resources.
*/ */
class RestSpecHack { public class RestSpecHack {
/** /**
* Sets dependencies needed to copy the rest spec. * Sets dependencies needed to copy the rest spec.
* @param project The project to add rest spec dependency to * @param project The project to add rest spec dependency to
*/ */
static void configureDependencies(Project project) { public static void configureDependencies(Project project) {
project.configurations { project.configurations {
restSpec restSpec
} }
@ -48,7 +48,7 @@ class RestSpecHack {
* @param project The project to add the copy task to * @param project The project to add the copy task to
* @param includePackagedTests true if the packaged tests should be copied, false otherwise * @param includePackagedTests true if the packaged tests should be copied, false otherwise
*/ */
static Task configureTask(Project project, boolean includePackagedTests) { public static Task configureTask(Project project, boolean includePackagedTests) {
Map copyRestSpecProps = [ Map copyRestSpecProps = [
name : 'copyRestSpec', name : 'copyRestSpec',
type : Copy, type : Copy,
@ -65,7 +65,6 @@ class RestSpecHack {
project.idea { project.idea {
module { module {
if (scopes.TEST != null) { if (scopes.TEST != null) {
// TODO: need to add the TEST scope somehow for rest test plugin...
scopes.TEST.plus.add(project.configurations.restSpec) scopes.TEST.plus.add(project.configurations.restSpec)
} }
} }

View File

@ -18,22 +18,19 @@
*/ */
package org.elasticsearch.gradle.test package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.gradle.api.Plugin import org.gradle.api.Plugin
import org.gradle.api.Project import org.gradle.api.Project
/** Configures the build to have a rest integration test. */ /** A plugin to add rest integration tests. Used for qa projects. */
class RestTestPlugin implements Plugin<Project> { public class RestTestPlugin implements Plugin<Project> {
@Override @Override
void apply(Project project) { public void apply(Project project) {
project.pluginManager.apply(StandaloneTestBasePlugin) project.pluginManager.apply(StandaloneTestBasePlugin)
RandomizedTestingTask integTest = RestIntegTestTask.configure(project) RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
RestSpecHack.configureDependencies(project) integTest.cluster.distribution = 'zip' // rest tests should run with the real zip
integTest.configure { integTest.mustRunAfter(project.precommit)
classpath = project.sourceSets.test.runtimeClasspath project.check.dependsOn(integTest)
testClassesDir project.sourceSets.test.output.classesDir
}
} }
} }

View File

@ -2,13 +2,17 @@ package org.elasticsearch.gradle.test
import org.gradle.api.DefaultTask import org.gradle.api.DefaultTask
import org.gradle.api.Project import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.internal.tasks.options.Option import org.gradle.api.internal.tasks.options.Option
import org.gradle.util.ConfigureUtil
class RunTask extends DefaultTask { public class RunTask extends DefaultTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false) ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false)
RunTask() { public RunTask() {
description = "Runs elasticsearch with '${project.path}'"
group = 'Verification'
project.afterEvaluate { project.afterEvaluate {
ClusterFormationTasks.setup(project, this, clusterConfig) ClusterFormationTasks.setup(project, this, clusterConfig)
} }
@ -22,11 +26,10 @@ class RunTask extends DefaultTask {
clusterConfig.debug = enabled; clusterConfig.debug = enabled;
} }
static void configure(Project project) { /** Configure the cluster that will be run. */
RunTask task = project.tasks.create( @Override
name: 'run', public Task configure(Closure closure) {
type: RunTask, ConfigureUtil.configure(closure, clusterConfig)
description: "Runs elasticsearch with '${project.path}'", return this
group: 'Verification')
} }
} }

View File

@ -27,12 +27,13 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.Plugin import org.gradle.api.Plugin
import org.gradle.api.Project import org.gradle.api.Project
import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaBasePlugin
import org.gradle.plugins.ide.eclipse.model.EclipseClasspath
/** Configures the build to have a rest integration test. */ /** Configures the build to have a rest integration test. */
class StandaloneTestBasePlugin implements Plugin<Project> { public class StandaloneTestBasePlugin implements Plugin<Project> {
@Override @Override
void apply(Project project) { public void apply(Project project) {
project.pluginManager.apply(JavaBasePlugin) project.pluginManager.apply(JavaBasePlugin)
project.pluginManager.apply(RandomizedTestingPlugin) project.pluginManager.apply(RandomizedTestingPlugin)
@ -40,25 +41,15 @@ class StandaloneTestBasePlugin implements Plugin<Project> {
BuildPlugin.configureRepositories(project) BuildPlugin.configureRepositories(project)
// only setup tests to build // only setup tests to build
project.sourceSets { project.sourceSets.create('test')
test project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}")
}
project.dependencies {
testCompile "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}"
}
project.eclipse { project.eclipse.classpath.sourceSets = [project.sourceSets.test]
classpath { project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
sourceSets = [project.sourceSets.test] project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs
plusConfigurations = [project.configurations.testRuntime] project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]]
}
} PrecommitTasks.create(project, false)
project.idea { project.check.dependsOn(project.precommit)
module {
testSourceDirs += project.sourceSets.test.java.srcDirs
scopes['TEST'] = [plus: [project.configurations.testRuntime]]
}
}
PrecommitTasks.configure(project)
} }
} }

View File

@ -25,11 +25,11 @@ import org.gradle.api.Plugin
import org.gradle.api.Project import org.gradle.api.Project
import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.JavaBasePlugin
/** Configures the build to have only unit tests. */ /** A plugin to add tests only. Used for QA tests that run arbitrary unit tests. */
class StandaloneTestPlugin implements Plugin<Project> { public class StandaloneTestPlugin implements Plugin<Project> {
@Override @Override
void apply(Project project) { public void apply(Project project) {
project.pluginManager.apply(StandaloneTestBasePlugin) project.pluginManager.apply(StandaloneTestBasePlugin)
Map testOptions = [ Map testOptions = [
@ -41,10 +41,9 @@ class StandaloneTestPlugin implements Plugin<Project> {
] ]
RandomizedTestingTask test = project.tasks.create(testOptions) RandomizedTestingTask test = project.tasks.create(testOptions)
test.configure(BuildPlugin.commonTestConfig(project)) test.configure(BuildPlugin.commonTestConfig(project))
test.configure { test.classpath = project.sourceSets.test.runtimeClasspath
classpath = project.sourceSets.test.runtimeClasspath test.testClassesDir project.sourceSets.test.output.classesDir
testClassesDir project.sourceSets.test.output.classesDir test.mustRunAfter(project.precommit)
}
project.check.dependsOn(test) project.check.dependsOn(test)
} }
} }

View File

@ -112,3 +112,7 @@ java.lang.System#setProperty(java.lang.String,java.lang.String)
java.lang.System#clearProperty(java.lang.String) java.lang.System#clearProperty(java.lang.String)
java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a read-only view java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a read-only view
@defaultMessage Avoid unchecked warnings by using Collections#empty(List|Map|Set) methods
java.util.Collections#EMPTY_LIST
java.util.Collections#EMPTY_MAP
java.util.Collections#EMPTY_SET

View File

@ -90,3 +90,12 @@ org.elasticsearch.common.io.PathUtils#get(java.net.URI)
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead @defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
org.apache.lucene.search.Query#setBoost(float) org.apache.lucene.search.Query#setBoost(float)
@defaultMessage Constructing a DateTime without a time zone is dangerous
org.joda.time.DateTime#<init>()
org.joda.time.DateTime#<init>(long)
org.joda.time.DateTime#<init>(int, int, int, int, int)
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
org.joda.time.DateTime#now()
org.joda.time.DateTimeZone#getDefault()

View File

@ -1,66 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
@defaultMessage unsafe encoders/decoders have problems in the lzf compress library. Use variants of encode/decode functions which take Encoder/Decoder.
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int)
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int)
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.impl.UnsafeChunkDecoder#<init>()
com.ning.compress.lzf.parallel.CompressTask
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance()
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int)
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int)
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(com.ning.compress.BufferRecycler)
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.util.ChunkDecoderFactory#optimalInstance()
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.File)
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.FileDescriptor)
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.lang.String)
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File)
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File, boolean)
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.FileDescriptor)
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String)
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String, boolean)
com.ning.compress.lzf.LZFEncoder#encode(byte[])
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int)
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int)
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.LZFCompressingInputStream#<init>(java.io.InputStream)
com.ning.compress.lzf.LZFDecoder#fastDecoder()
com.ning.compress.lzf.LZFDecoder#decode(byte[])
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int)
com.ning.compress.lzf.LZFDecoder#decode(byte[], byte[])
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int, byte[])
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream)
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, boolean)
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler, boolean)
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream)
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream, com.ning.compress.BufferRecycler)
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler)
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler, com.ning.compress.BufferRecycler)
@defaultMessage Constructing a DateTime without a time zone is dangerous
org.joda.time.DateTime#<init>()
org.joda.time.DateTime#<init>(long)
org.joda.time.DateTime#<init>(int, int, int, int, int)
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
org.joda.time.DateTime#now()
org.joda.time.DateTimeZone#getDefault()

View File

@ -62,12 +62,9 @@ dependencies {
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
compile "org.yaml:snakeyaml:1.15" // used by jackson yaml
// network stack // network stack
compile 'io.netty:netty:3.10.5.Final' compile 'io.netty:netty:3.10.5.Final'
// compression of transport protocol
compile 'com.ning:compress-lzf:1.0.2'
// percentiles aggregation // percentiles aggregation
compile 'com.tdunning:t-digest:3.0' compile 'com.tdunning:t-digest:3.0'
// precentil ranks aggregation // precentil ranks aggregation
@ -117,6 +114,9 @@ forbiddenPatterns {
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt' exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
} }
// dependency license are currently checked in distribution
dependencyLicenses.enabled = false
if (isEclipse == false || project.path == ":core-tests") { if (isEclipse == false || project.path == ":core-tests") {
task integTest(type: RandomizedTestingTask, task integTest(type: RandomizedTestingTask,
group: JavaBasePlugin.VERIFICATION_GROUP, group: JavaBasePlugin.VERIFICATION_GROUP,
@ -129,8 +129,4 @@ if (isEclipse == false || project.path == ":core-tests") {
} }
check.dependsOn integTest check.dependsOn integTest
integTest.mustRunAfter test integTest.mustRunAfter test
RestSpecHack.configureDependencies(project)
Task copyRestSpec = RestSpecHack.configureTask(project, true)
integTest.dependsOn copyRestSpec
} }

View File

@ -554,7 +554,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84), NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85), ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86), AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
MERGE_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.MergeMappingException.class, org.elasticsearch.index.mapper.MergeMappingException::new, 87), // 87 used to be for MergeMappingException
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88), INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89), PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90), REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),

View File

@ -74,7 +74,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) { protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
if (request.waitForEvents() != null) { if (request.waitForEvents() != null) {
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis(); final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
return currentState; return currentState;

View File

@ -72,14 +72,14 @@ public class NodeInfo extends BaseNodeResponse {
private HttpInfo http; private HttpInfo http;
@Nullable @Nullable
private PluginsInfo plugins; private PluginsAndModules plugins;
NodeInfo() { NodeInfo() {
} }
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings, public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, @Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsInfo plugins) { @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
super(node); super(node);
this.version = version; this.version = version;
this.build = build; this.build = build;
@ -172,7 +172,7 @@ public class NodeInfo extends BaseNodeResponse {
} }
@Nullable @Nullable
public PluginsInfo getPlugins() { public PluginsAndModules getPlugins() {
return this.plugins; return this.plugins;
} }
@ -217,7 +217,8 @@ public class NodeInfo extends BaseNodeResponse {
http = HttpInfo.readHttpInfo(in); http = HttpInfo.readHttpInfo(in);
} }
if (in.readBoolean()) { if (in.readBoolean()) {
plugins = PluginsInfo.readPluginsInfo(in); plugins = new PluginsAndModules();
plugins.readFrom(in);
} }
} }

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.info;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.plugins.PluginInfo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Information about plugins and modules
*/
public class PluginsAndModules implements Streamable, ToXContent {
private List<PluginInfo> plugins;
private List<PluginInfo> modules;
public PluginsAndModules() {
plugins = new ArrayList<>();
modules = new ArrayList<>();
}
/**
* Returns an ordered list based on plugins name
*/
public List<PluginInfo> getPluginInfos() {
List<PluginInfo> plugins = new ArrayList<>(this.plugins);
Collections.sort(plugins, (p1, p2) -> p1.getName().compareTo(p2.getName()));
return plugins;
}
/**
* Returns an ordered list based on modules name
*/
public List<PluginInfo> getModuleInfos() {
List<PluginInfo> modules = new ArrayList<>(this.modules);
Collections.sort(modules, (p1, p2) -> p1.getName().compareTo(p2.getName()));
return modules;
}
public void addPlugin(PluginInfo info) {
plugins.add(info);
}
public void addModule(PluginInfo info) {
modules.add(info);
}
@Override
public void readFrom(StreamInput in) throws IOException {
if (plugins.isEmpty() == false || modules.isEmpty() == false) {
throw new IllegalStateException("instance is already populated");
}
int plugins_size = in.readInt();
for (int i = 0; i < plugins_size; i++) {
plugins.add(PluginInfo.readFromStream(in));
}
int modules_size = in.readInt();
for (int i = 0; i < modules_size; i++) {
modules.add(PluginInfo.readFromStream(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(plugins.size());
for (PluginInfo plugin : getPluginInfos()) {
plugin.writeTo(out);
}
out.writeInt(modules.size());
for (PluginInfo module : getModuleInfos()) {
module.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("plugins");
for (PluginInfo pluginInfo : getPluginInfos()) {
pluginInfo.toXContent(builder, params);
}
builder.endArray();
// TODO: not ideal, make a better api for this (e.g. with jar metadata, and so on)
builder.startArray("modules");
for (PluginInfo moduleInfo : getModuleInfos()) {
moduleInfo.toXContent(builder, params);
}
builder.endArray();
return builder;
}
}

View File

@ -1,101 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.info;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.plugins.PluginInfo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
public class PluginsInfo implements Streamable, ToXContent {
static final class Fields {
static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
}
private List<PluginInfo> infos;
public PluginsInfo() {
infos = new ArrayList<>();
}
public PluginsInfo(int size) {
infos = new ArrayList<>(size);
}
/**
* @return an ordered list based on plugins name
*/
public List<PluginInfo> getInfos() {
Collections.sort(infos, new Comparator<PluginInfo>() {
@Override
public int compare(final PluginInfo o1, final PluginInfo o2) {
return o1.getName().compareTo(o2.getName());
}
});
return infos;
}
public void add(PluginInfo info) {
infos.add(info);
}
public static PluginsInfo readPluginsInfo(StreamInput in) throws IOException {
PluginsInfo infos = new PluginsInfo();
infos.readFrom(in);
return infos;
}
@Override
public void readFrom(StreamInput in) throws IOException {
int plugins_size = in.readInt();
for (int i = 0; i < plugins_size; i++) {
infos.add(PluginInfo.readFromStream(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(infos.size());
for (PluginInfo plugin : getInfos()) {
plugin.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(Fields.PLUGINS);
for (PluginInfo pluginInfo : getInfos()) {
pluginInfo.toXContent(builder, params);
}
builder.endArray();
return builder;
}
}

View File

@ -68,7 +68,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
@Override @Override
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) { protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterRerouteResponse>(request, listener) { clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {
private volatile ClusterState clusterStateToSend; private volatile ClusterState clusterStateToSend;
private volatile RoutingExplanations explanations; private volatile RoutingExplanations explanations;

View File

@ -91,7 +91,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
final Settings.Builder transientUpdates = Settings.settingsBuilder(); final Settings.Builder transientUpdates = Settings.settingsBuilder();
final Settings.Builder persistentUpdates = Settings.settingsBuilder(); final Settings.Builder persistentUpdates = Settings.settingsBuilder();
clusterService.submitStateUpdateTask("cluster_update_settings", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) { clusterService.submitStateUpdateTask("cluster_update_settings",
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) {
private volatile boolean changed = false; private volatile boolean changed = false;
@ -132,7 +133,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
// in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible // in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible
// to the components until the ClusterStateListener instances have been invoked, but are visible after // to the components until the ClusterStateListener instances have been invoked, but are visible after
// the first update task has been completed. // the first update task has been completed.
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) { clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings",
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.URGENT, request, listener) {
@Override @Override
public boolean mustAck(DiscoveryNode discoveryNode) { public boolean mustAck(DiscoveryNode discoveryNode) {

View File

@ -74,7 +74,7 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
versions.add(nodeResponse.nodeInfo().getVersion()); versions.add(nodeResponse.nodeInfo().getVersion());
process.addNodeStats(nodeResponse.nodeStats()); process.addNodeStats(nodeResponse.nodeStats());
jvm.addNodeInfoStats(nodeResponse.nodeInfo(), nodeResponse.nodeStats()); jvm.addNodeInfoStats(nodeResponse.nodeInfo(), nodeResponse.nodeStats());
plugins.addAll(nodeResponse.nodeInfo().getPlugins().getInfos()); plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
// now do the stats that should be deduped by hardware (implemented by ip deduping) // now do the stats that should be deduped by hardware (implemented by ip deduping)
TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress(); TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();

View File

@ -46,9 +46,10 @@ import java.util.List;
import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.action.ValidateActions.addValidationError;
/** /**
* A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
* it in a single batch. * and allows to executes it in a single batch.
* *
* Note that we only support refresh on the bulk request not per item.
* @see org.elasticsearch.client.Client#bulk(BulkRequest) * @see org.elasticsearch.client.Client#bulk(BulkRequest)
*/ */
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest { public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest {
@ -89,6 +90,12 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
return add(request, null); return add(request, null);
} }
/**
* Add a request to the current BulkRequest.
* @param request Request to add
* @param payload Optional payload
* @return the current bulk request
*/
public BulkRequest add(ActionRequest request, @Nullable Object payload) { public BulkRequest add(ActionRequest request, @Nullable Object payload) {
if (request instanceof IndexRequest) { if (request instanceof IndexRequest) {
add((IndexRequest) request, payload); add((IndexRequest) request, payload);
@ -127,7 +134,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) { BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) {
requests.add(request); requests.add(request);
addPayload(payload); addPayload(payload);
sizeInBytes += request.source().length() + REQUEST_OVERHEAD; // lack of source is validated in validate() method
sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
return this; return this;
} }
@ -292,7 +300,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
String parent = null; String parent = null;
String[] fields = defaultFields; String[] fields = defaultFields;
String timestamp = null; String timestamp = null;
Long ttl = null; TimeValue ttl = null;
String opType = null; String opType = null;
long version = Versions.MATCH_ANY; long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL; VersionType versionType = VersionType.INTERNAL;
@ -325,9 +333,9 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
timestamp = parser.text(); timestamp = parser.text();
} else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) { } else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName).millis(); ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName);
} else { } else {
ttl = parser.longValue(); ttl = new TimeValue(parser.longValue());
} }
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) { } else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
opType = parser.text(); opType = parser.text();
@ -478,8 +486,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
if (requests.isEmpty()) { if (requests.isEmpty()) {
validationException = addValidationError("no requests added", validationException); validationException = addValidationError("no requests added", validationException);
} }
for (int i = 0; i < requests.size(); i++) { for (ActionRequest request : requests) {
ActionRequestValidationException ex = requests.get(i).validate(); // We first check if refresh has been set
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
(request instanceof IndexRequest && ((IndexRequest)request).refresh())) {
validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException);
}
ActionRequestValidationException ex = request.validate();
if (ex != null) { if (ex != null) {
if (validationException == null) { if (validationException == null) {
validationException = new ActionRequestValidationException(); validationException = new ActionRequestValidationException();

View File

@ -335,7 +335,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index()); indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index());
} }
return executeIndexRequestOnPrimary(request, indexRequest, indexShard); return executeIndexRequestOnPrimary(indexRequest, indexShard);
} }
private WriteResult<DeleteResponse> shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) { private WriteResult<DeleteResponse> shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) {

View File

@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.*; import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
@ -136,7 +137,8 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
private String parent; private String parent;
@Nullable @Nullable
private String timestamp; private String timestamp;
private long ttl = -1; @Nullable
private TimeValue ttl;
private BytesReference source; private BytesReference source;
@ -229,6 +231,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
if (!versionType.validateVersionForWrites(version)) { if (!versionType.validateVersionForWrites(version)) {
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
} }
if (ttl != null) {
if (ttl.millis() < 0) {
validationException = addValidationError("ttl must not be negative", validationException);
}
}
return validationException; return validationException;
} }
@ -324,22 +332,33 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
} }
/** /**
* Sets the relative ttl value. It musts be &gt; 0 as it makes little sense otherwise. Setting it * Sets the ttl value as a time value expression.
* to <tt>null</tt> will reset to have no ttl.
*/ */
public IndexRequest ttl(Long ttl) throws ElasticsearchGenerationException { public IndexRequest ttl(String ttl) {
if (ttl == null) { this.ttl = TimeValue.parseTimeValue(ttl, null, "ttl");
this.ttl = -1; return this;
return this; }
}
if (ttl <= 0) { /**
throw new IllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]"); * Sets the ttl as a {@link TimeValue} instance.
} */
public IndexRequest ttl(TimeValue ttl) {
this.ttl = ttl; this.ttl = ttl;
return this; return this;
} }
public long ttl() { /**
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
*/
public IndexRequest ttl(long ttl) {
this.ttl = new TimeValue(ttl);
return this;
}
/**
* Returns the ttl as a {@link TimeValue}
*/
public TimeValue ttl() {
return this.ttl; return this.ttl;
} }
@ -665,7 +684,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
routing = in.readOptionalString(); routing = in.readOptionalString();
parent = in.readOptionalString(); parent = in.readOptionalString();
timestamp = in.readOptionalString(); timestamp = in.readOptionalString();
ttl = in.readLong(); ttl = in.readBoolean() ? TimeValue.readTimeValue(in) : null;
source = in.readBytesReference(); source = in.readBytesReference();
opType = OpType.fromId(in.readByte()); opType = OpType.fromId(in.readByte());
@ -682,7 +701,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
out.writeOptionalString(routing); out.writeOptionalString(routing);
out.writeOptionalString(parent); out.writeOptionalString(parent);
out.writeOptionalString(timestamp); out.writeOptionalString(timestamp);
out.writeLong(ttl); if (ttl == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
ttl.writeTo(out);
}
out.writeBytesReference(source); out.writeBytesReference(source);
out.writeByte(opType.id()); out.writeByte(opType.id());
out.writeBoolean(refresh); out.writeBoolean(refresh);

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
@ -254,9 +255,27 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
return this; return this;
} }
// Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise. /**
* Sets the ttl value as a time value expression.
*/
public IndexRequestBuilder setTTL(String ttl) {
request.ttl(ttl);
return this;
}
/**
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
*/
public IndexRequestBuilder setTTL(long ttl) { public IndexRequestBuilder setTTL(long ttl) {
request.ttl(ttl); request.ttl(ttl);
return this; return this;
} }
/**
* Sets the ttl as a {@link TimeValue} instance.
*/
public IndexRequestBuilder setTTL(TimeValue ttl) {
request.ttl(ttl);
return this;
}
} }

View File

@ -166,7 +166,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardRequest.shardId.id()); IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard); final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(request, indexShard);
final IndexResponse response = result.response; final IndexResponse response = result.response;
final Translog.Location location = result.location; final Translog.Location location = result.location;

View File

@ -223,7 +223,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
throw requestBlockException; throw requestBlockException;
} }
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version()); if (logger.isTraceEnabled()) {
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
}
ShardsIterator shardIt = shards(clusterState, request, concreteIndices); ShardsIterator shardIt = shards(clusterState, request, concreteIndices);
nodeIds = new HashMap<>(); nodeIds = new HashMap<>();
@ -300,7 +302,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
} }
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) { protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
logger.trace("received response for [{}] from node [{}]", actionName, node.id()); if (logger.isTraceEnabled()) {
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
}
// this is defensive to protect against the possibility of double invocation // this is defensive to protect against the possibility of double invocation
// the current implementation of TransportService#sendRequest guards against this // the current implementation of TransportService#sendRequest guards against this
@ -351,7 +355,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception { public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception {
List<ShardRouting> shards = request.getShards(); List<ShardRouting> shards = request.getShards();
final int totalShards = shards.size(); final int totalShards = shards.size();
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards); if (logger.isTraceEnabled()) {
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
}
final Object[] shardResultOrExceptions = new Object[totalShards]; final Object[] shardResultOrExceptions = new Object[totalShards];
int shardIndex = -1; int shardIndex = -1;
@ -375,10 +381,14 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) { private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) {
try { try {
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary()); if (logger.isTraceEnabled()) {
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
}
ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting); ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting);
shardResults[shardIndex] = result; shardResults[shardIndex] = result;
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary()); if (logger.isTraceEnabled()) {
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
}
} catch (Throwable t) { } catch (Throwable t) {
BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t); BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
e.setIndex(shardRouting.getIndex()); e.setIndex(shardRouting.getIndex());

View File

@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.ActionWriteResponse;
import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.UnavailableShardsException;
import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.WriteConsistencyLevel;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequest.OpType; import org.elasticsearch.action.index.IndexRequest.OpType;
import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.IndexResponse;
@ -1074,23 +1073,22 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
/** Utility method to create either an index or a create operation depending /** Utility method to create either an index or a create operation depending
* on the {@link OpType} of the request. */ * on the {@link OpType} of the request. */
private final Engine.Index prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) { private Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id()) SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY); return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
} }
/** Execute the given {@link IndexRequest} on a primary shard, throwing a /** Execute the given {@link IndexRequest} on a primary shard, throwing a
* {@link RetryOnPrimaryException} if the operation needs to be re-tried. */ * {@link RetryOnPrimaryException} if the operation needs to be re-tried. */
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable { protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard) throws Throwable {
Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = indexShard.shardId(); final ShardId shardId = indexShard.shardId();
if (update != null) { if (update != null) {
final String indexName = shardId.getIndex(); final String indexName = shardId.getIndex();
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard); operation = prepareIndexOperationOnPrimary(request, indexShard);
update = operation.parsedDoc().dynamicMappingsUpdate(); update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) { if (update != null) {
throw new RetryOnPrimaryException(shardId, throw new RetryOnPrimaryException(shardId,

View File

@ -88,7 +88,7 @@ public class UpdateHelper extends AbstractComponent {
throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id()); throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
} }
IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest(); IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
Long ttl = indexRequest.ttl(); TimeValue ttl = indexRequest.ttl();
if (request.scriptedUpsert() && request.script() != null) { if (request.scriptedUpsert() && request.script() != null) {
// Run the script to perform the create logic // Run the script to perform the create logic
IndexRequest upsert = request.upsertRequest(); IndexRequest upsert = request.upsertRequest();
@ -99,7 +99,7 @@ public class UpdateHelper extends AbstractComponent {
ctx.put("_source", upsertDoc); ctx.put("_source", upsertDoc);
ctx = executeScript(request, ctx); ctx = executeScript(request, ctx);
//Allow the script to set TTL using ctx._ttl //Allow the script to set TTL using ctx._ttl
if (ttl < 0) { if (ttl == null) {
ttl = getTTLFromScriptContext(ctx); ttl = getTTLFromScriptContext(ctx);
} }
@ -124,7 +124,7 @@ public class UpdateHelper extends AbstractComponent {
indexRequest.index(request.index()).type(request.type()).id(request.id()) indexRequest.index(request.index()).type(request.type()).id(request.id())
// it has to be a "create!" // it has to be a "create!"
.create(true) .create(true)
.ttl(ttl == null || ttl < 0 ? null : ttl) .ttl(ttl)
.refresh(request.refresh()) .refresh(request.refresh())
.routing(request.routing()) .routing(request.routing())
.parent(request.parent()) .parent(request.parent())
@ -151,7 +151,7 @@ public class UpdateHelper extends AbstractComponent {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true); Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
String operation = null; String operation = null;
String timestamp = null; String timestamp = null;
Long ttl = null; TimeValue ttl = null;
final Map<String, Object> updatedSourceAsMap; final Map<String, Object> updatedSourceAsMap;
final XContentType updateSourceContentType = sourceAndContent.v1(); final XContentType updateSourceContentType = sourceAndContent.v1();
String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null; String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
@ -160,7 +160,7 @@ public class UpdateHelper extends AbstractComponent {
if (request.script() == null && request.doc() != null) { if (request.script() == null && request.doc() != null) {
IndexRequest indexRequest = request.doc(); IndexRequest indexRequest = request.doc();
updatedSourceAsMap = sourceAndContent.v2(); updatedSourceAsMap = sourceAndContent.v2();
if (indexRequest.ttl() > 0) { if (indexRequest.ttl() != null) {
ttl = indexRequest.ttl(); ttl = indexRequest.ttl();
} }
timestamp = indexRequest.timestamp(); timestamp = indexRequest.timestamp();
@ -211,9 +211,9 @@ public class UpdateHelper extends AbstractComponent {
// apply script to update the source // apply script to update the source
// No TTL has been given in the update script so we keep previous TTL value if there is one // No TTL has been given in the update script so we keep previous TTL value if there is one
if (ttl == null) { if (ttl == null) {
ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null; Long ttlAsLong = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
if (ttl != null) { if (ttlAsLong != null) {
ttl = ttl - TimeValue.nsecToMSec(System.nanoTime() - getDateNS); // It is an approximation of exact TTL value, could be improved ttl = new TimeValue(ttlAsLong - TimeValue.nsecToMSec(System.nanoTime() - getDateNS));// It is an approximation of exact TTL value, could be improved
} }
} }
@ -256,17 +256,15 @@ public class UpdateHelper extends AbstractComponent {
return ctx; return ctx;
} }
private Long getTTLFromScriptContext(Map<String, Object> ctx) { private TimeValue getTTLFromScriptContext(Map<String, Object> ctx) {
Long ttl = null;
Object fetchedTTL = ctx.get("_ttl"); Object fetchedTTL = ctx.get("_ttl");
if (fetchedTTL != null) { if (fetchedTTL != null) {
if (fetchedTTL instanceof Number) { if (fetchedTTL instanceof Number) {
ttl = ((Number) fetchedTTL).longValue(); return new TimeValue(((Number) fetchedTTL).longValue());
} else {
ttl = TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl").millis();
} }
return TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl");
} }
return ttl; return null;
} }
/** /**
@ -337,13 +335,10 @@ public class UpdateHelper extends AbstractComponent {
} }
} }
public static enum Operation { public enum Operation {
UPSERT, UPSERT,
INDEX, INDEX,
DELETE, DELETE,
NONE NONE
} }
} }

View File

@ -24,6 +24,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder; import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
@ -325,7 +326,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
} }
/** /**
* Set the new ttl of the document. Note that if detectNoop is true (the default) * Set the new ttl of the document as a long. Note that if detectNoop is true (the default)
* and the source of the document isn't changed then the ttl update won't take * and the source of the document isn't changed then the ttl update won't take
* effect. * effect.
*/ */
@ -333,4 +334,24 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
request.doc().ttl(ttl); request.doc().ttl(ttl);
return this; return this;
} }
/**
* Set the new ttl of the document as a time value expression. Note that if detectNoop is true (the default)
* and the source of the document isn't changed then the ttl update won't take
* effect.
*/
public UpdateRequestBuilder setTtl(String ttl) {
request.doc().ttl(ttl);
return this;
}
/**
* Set the new ttl of the document as a {@link TimeValue} instance. Note that if detectNoop is true (the default)
* and the source of the document isn't changed then the ttl update won't take
* effect.
*/
public UpdateRequestBuilder setTtl(TimeValue ttl) {
request.doc().ttl(ttl);
return this;
}
} }

View File

@ -217,4 +217,88 @@ final class JNAKernel32Library {
* @return true if the function succeeds. * @return true if the function succeeds.
*/ */
native boolean CloseHandle(Pointer handle); native boolean CloseHandle(Pointer handle);
/**
* Creates or opens a new job object
*
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx
*
* @param jobAttributes security attributes
* @param name job name
* @return job handle if the function succeeds
*/
native Pointer CreateJobObjectW(Pointer jobAttributes, String name);
/**
* Associates a process with an existing job
*
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx
*
* @param job job handle
* @param process process handle
* @return true if the function succeeds
*/
native boolean AssignProcessToJobObject(Pointer job, Pointer process);
/**
* Basic limit information for a job object
*
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx
*/
public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference {
public long PerProcessUserTimeLimit;
public long PerJobUserTimeLimit;
public int LimitFlags;
public SizeT MinimumWorkingSetSize;
public SizeT MaximumWorkingSetSize;
public int ActiveProcessLimit;
public Pointer Affinity;
public int PriorityClass;
public int SchedulingClass;
@Override
protected List<String> getFieldOrder() {
return Arrays.asList(new String[] {
"PerProcessUserTimeLimit", "PerJobUserTimeLimit", "LimitFlags", "MinimumWorkingSetSize",
"MaximumWorkingSetSize", "ActiveProcessLimit", "Affinity", "PriorityClass", "SchedulingClass"
});
}
}
/**
* Constant for JOBOBJECT_BASIC_LIMIT_INFORMATION in Query/Set InformationJobObject
*/
static final int JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS = 2;
/**
* Constant for LimitFlags, indicating a process limit has been set
*/
static final int JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8;
/**
* Get job limit and state information
*
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx
*
* @param job job handle
* @param infoClass information class constant
* @param info pointer to information structure
* @param infoLength size of information structure
* @param returnLength length of data written back to structure (or null if not wanted)
* @return true if the function succeeds
*/
native boolean QueryInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength, Pointer returnLength);
/**
* Set job limit and state information
*
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx
*
* @param job job handle
* @param infoClass information class constant
* @param info pointer to information structure
* @param infoLength size of information structure
* @return true if the function succeeds
*/
native boolean SetInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength);
} }

View File

@ -47,7 +47,7 @@ import java.util.Map;
* Installs a limited form of secure computing mode, * Installs a limited form of secure computing mode,
* to filters system calls to block process execution. * to filters system calls to block process execution.
* <p> * <p>
* This is only supported on the Linux, Solaris, FreeBSD, OpenBSD, and Mac OS X operating systems. * This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows.
* <p> * <p>
* On Linux it currently supports amd64 and i386 architectures, requires Linux kernel 3.5 or above, and requires * On Linux it currently supports amd64 and i386 architectures, requires Linux kernel 3.5 or above, and requires
* {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} compiled into the kernel. * {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} compiled into the kernel.
@ -80,6 +80,8 @@ import java.util.Map;
* <li>{@code process-exec}</li> * <li>{@code process-exec}</li>
* </ul> * </ul>
* <p> * <p>
* On Windows, process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}.
* <p>
* This is not intended as a sandbox. It is another level of security, mostly intended to annoy * This is not intended as a sandbox. It is another level of security, mostly intended to annoy
* security researchers and make their lives more difficult in achieving "remote execution" exploits. * security researchers and make their lives more difficult in achieving "remote execution" exploits.
* @see <a href="http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt"> * @see <a href="http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt">
@ -329,7 +331,8 @@ final class Seccomp {
case 1: break; // already set by caller case 1: break; // already set by caller
default: default:
int errno = Native.getLastError(); int errno = Native.getLastError();
if (errno == ENOSYS) { if (errno == EINVAL) {
// friendly error, this will be the typical case for an old kernel
throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in"); throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
} else { } else {
throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno)); throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno));
@ -561,6 +564,48 @@ final class Seccomp {
logger.debug("BSD RLIMIT_NPROC initialization successful"); logger.debug("BSD RLIMIT_NPROC initialization successful");
} }
// windows impl via job ActiveProcessLimit
static void windowsImpl() {
if (!Constants.WINDOWS) {
throw new IllegalStateException("bug: should not be trying to initialize ActiveProcessLimit for an unsupported OS");
}
JNAKernel32Library lib = JNAKernel32Library.getInstance();
// create a new Job
Pointer job = lib.CreateJobObjectW(null, null);
if (job == null) {
throw new UnsupportedOperationException("CreateJobObject: " + Native.getLastError());
}
try {
// retrieve the current basic limits of the job
int clazz = JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS;
JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION limits = new JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION();
limits.write();
if (!lib.QueryInformationJobObject(job, clazz, limits.getPointer(), limits.size(), null)) {
throw new UnsupportedOperationException("QueryInformationJobObject: " + Native.getLastError());
}
limits.read();
// modify the number of active processes to be 1 (exactly the one process we will add to the job).
limits.ActiveProcessLimit = 1;
limits.LimitFlags = JNAKernel32Library.JOB_OBJECT_LIMIT_ACTIVE_PROCESS;
limits.write();
if (!lib.SetInformationJobObject(job, clazz, limits.getPointer(), limits.size())) {
throw new UnsupportedOperationException("SetInformationJobObject: " + Native.getLastError());
}
// assign ourselves to the job
if (!lib.AssignProcessToJobObject(job, lib.GetCurrentProcess())) {
throw new UnsupportedOperationException("AssignProcessToJobObject: " + Native.getLastError());
}
} finally {
lib.CloseHandle(job);
}
logger.debug("Windows ActiveProcessLimit initialization successful");
}
/** /**
* Attempt to drop the capability to execute for the process. * Attempt to drop the capability to execute for the process.
* <p> * <p>
@ -581,6 +626,9 @@ final class Seccomp {
} else if (Constants.FREE_BSD || OPENBSD) { } else if (Constants.FREE_BSD || OPENBSD) {
bsdImpl(); bsdImpl();
return 1; return 1;
} else if (Constants.WINDOWS) {
windowsImpl();
return 1;
} else { } else {
throw new UnsupportedOperationException("syscall filtering not supported for OS: '" + Constants.OS_NAME + "'"); throw new UnsupportedOperationException("syscall filtering not supported for OS: '" + Constants.OS_NAME + "'");
} }

View File

@ -131,34 +131,48 @@ final class Security {
@SuppressForbidden(reason = "proper use of URL") @SuppressForbidden(reason = "proper use of URL")
static Map<String,Policy> getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException { static Map<String,Policy> getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException {
Map<String,Policy> map = new HashMap<>(); Map<String,Policy> map = new HashMap<>();
// collect up lists of plugins and modules
List<Path> pluginsAndModules = new ArrayList<>();
if (Files.exists(environment.pluginsFile())) { if (Files.exists(environment.pluginsFile())) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) { try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
for (Path plugin : stream) { for (Path plugin : stream) {
Path policyFile = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY); pluginsAndModules.add(plugin);
if (Files.exists(policyFile)) { }
// first get a list of URLs for the plugins' jars: }
// we resolve symlinks so map is keyed on the normalize codebase name }
List<URL> codebases = new ArrayList<>(); if (Files.exists(environment.modulesFile())) {
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) { try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.modulesFile())) {
for (Path jar : jarStream) { for (Path plugin : stream) {
codebases.add(jar.toRealPath().toUri().toURL()); pluginsAndModules.add(plugin);
} }
} }
}
// parse the plugin's policy file into a set of permissions // now process each one
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()])); for (Path plugin : pluginsAndModules) {
Path policyFile = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY);
// consult this policy for each of the plugin's jars: if (Files.exists(policyFile)) {
for (URL url : codebases) { // first get a list of URLs for the plugins' jars:
if (map.put(url.getFile(), policy) != null) { // we resolve symlinks so map is keyed on the normalize codebase name
// just be paranoid ok? List<URL> codebases = new ArrayList<>();
throw new IllegalStateException("per-plugin permissions already granted for jar file: " + url); try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
} for (Path jar : jarStream) {
} codebases.add(jar.toRealPath().toUri().toURL());
}
}
// parse the plugin's policy file into a set of permissions
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()]));
// consult this policy for each of the plugin's jars:
for (URL url : codebases) {
if (map.put(url.getFile(), policy) != null) {
// just be paranoid ok?
throw new IllegalStateException("per-plugin permissions already granted for jar file: " + url);
} }
} }
} }
} }
return Collections.unmodifiableMap(map); return Collections.unmodifiableMap(map);
} }
@ -228,6 +242,7 @@ final class Security {
// read-only dirs // read-only dirs
addPath(policy, "path.home", environment.binFile(), "read,readlink"); addPath(policy, "path.home", environment.binFile(), "read,readlink");
addPath(policy, "path.home", environment.libFile(), "read,readlink"); addPath(policy, "path.home", environment.libFile(), "read,readlink");
addPath(policy, "path.home", environment.modulesFile(), "read,readlink");
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink"); addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
addPath(policy, "path.conf", environment.configFile(), "read,readlink"); addPath(policy, "path.conf", environment.configFile(), "read,readlink");
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink"); addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");

View File

@ -125,7 +125,7 @@ public class TransportClient extends AbstractClient {
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE) .put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
.build(); .build();
PluginsService pluginsService = new PluginsService(settings, null, pluginClasses); PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
this.settings = pluginsService.updatedSettings(); this.settings = pluginsService.updatedSettings();
Version version = Version.CURRENT; Version version = Version.CURRENT;

View File

@ -0,0 +1,54 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
public interface AckedClusterStateTaskListener extends ClusterStateTaskListener {
/**
* Called to determine which nodes the acknowledgement is expected from
*
* @param discoveryNode a node
* @return true if the node is expected to send ack back, false otherwise
*/
boolean mustAck(DiscoveryNode discoveryNode);
/**
* Called once all the nodes have acknowledged the cluster state update request. Must be
* very lightweight execution, since it gets executed on the cluster service thread.
*
* @param t optional error that might have been thrown
*/
void onAllNodesAcked(@Nullable Throwable t);
/**
* Called once the acknowledgement timeout defined by
* {@link AckedClusterStateUpdateTask#ackTimeout()} has expired
*/
void onAckTimeout();
/**
* Acknowledgement timeout, maximum time interval to wait for acknowledgements
*/
TimeValue ackTimeout();
}

View File

@ -22,18 +22,24 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ack.AckedRequest; import org.elasticsearch.cluster.ack.AckedRequest;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
/** /**
* An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when * An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
* all the nodes have acknowledged a cluster state update request * all the nodes have acknowledged a cluster state update request
*/ */
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask { public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask implements AckedClusterStateTaskListener {
private final ActionListener<Response> listener; private final ActionListener<Response> listener;
private final AckedRequest request; private final AckedRequest request;
protected AckedClusterStateUpdateTask(AckedRequest request, ActionListener<Response> listener) { protected AckedClusterStateUpdateTask(AckedRequest request, ActionListener<Response> listener) {
this(Priority.NORMAL, request, listener);
}
protected AckedClusterStateUpdateTask(Priority priority, AckedRequest request, ActionListener<Response> listener) {
super(priority);
this.listener = listener; this.listener = listener;
this.request = request; this.request = request;
} }

View File

@ -176,7 +176,6 @@ public class ClusterModule extends AbstractModule {
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC, Validator.BYTES_SIZE);
registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR); registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER); registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER); registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);

View File

@ -24,7 +24,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
@ -101,12 +100,35 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener); void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener);
/** /**
* Submits a task that will update the cluster state. * Submits a cluster state update task; submitted updates will be
* batched across the same instance of executor. The exact batching
* semantics depend on the underlying implementation but a rough
* guideline is that if the update task is submitted while there
* are pending update tasks for the same executor, these update
* tasks will all be executed on the executor in a single batch
*
* @param source the source of the cluster state update task
* @param task the state needed for the cluster state update task
* @param config the cluster state update task configuration
* @param executor the cluster state update task executor; tasks
* that share the same executor will be executed
* batches on this executor
* @param listener callback after the cluster state update task
* completes
* @param <T> the type of the cluster state update task state
*/ */
void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask); <T> void submitStateUpdateTask(final String source, final T task,
final ClusterStateTaskConfig config,
final ClusterStateTaskExecutor<T> executor,
final ClusterStateTaskListener listener);
/** /**
* Submits a task that will update the cluster state (the task has a default priority of {@link Priority#NORMAL}). * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener)},
* submitted updates will not be batched.
*
* @param source the source of the cluster state update task
* @param updateTask the full context for the cluster state update
* task
*/ */
void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask); void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask);

View File

@ -19,9 +19,9 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -469,6 +469,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
} }
builder.endArray(); builder.endArray();
builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
builder.startArray(String.valueOf(cursor.key));
for (String allocationId : cursor.value) {
builder.value(allocationId);
}
builder.endArray();
}
builder.endObject();
builder.endObject(); builder.endObject();
} }
builder.endObject(); builder.endObject();
@ -584,6 +594,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
public Builder routingResult(RoutingAllocation.Result routingResult) { public Builder routingResult(RoutingAllocation.Result routingResult) {
this.routingTable = routingResult.routingTable(); this.routingTable = routingResult.routingTable();
this.metaData = routingResult.metaData();
return this; return this;
} }
@ -759,7 +770,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
nodes = after.nodes.diff(before.nodes); nodes = after.nodes.diff(before.nodes);
metaData = after.metaData.diff(before.metaData); metaData = after.metaData.diff(before.metaData);
blocks = after.blocks.diff(before.blocks); blocks = after.blocks.diff(before.blocks);
customs = DiffableUtils.diff(before.customs, after.customs); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
} }
public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException { public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException {
@ -771,14 +782,15 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
nodes = proto.nodes.readDiffFrom(in); nodes = proto.nodes.readDiffFrom(in);
metaData = proto.metaData.readDiffFrom(in); metaData = proto.metaData.readDiffFrom(in);
blocks = proto.blocks.readDiffFrom(in); blocks = proto.blocks.readDiffFrom(in);
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() { customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
@Override @Override
public Custom readFrom(StreamInput in, String key) throws IOException { public Custom read(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readFrom(in); return lookupPrototypeSafe(key).readFrom(in);
} }
@Override @Override
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException { public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readDiffFrom(in); return lookupPrototypeSafe(key).readDiffFrom(in);
} }
}); });

View File

@ -0,0 +1,92 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.unit.TimeValue;
/**
* Cluster state update task configuration for timeout and priority
*/
public interface ClusterStateTaskConfig {
/**
* The timeout for this cluster state update task configuration. If
* the cluster state update task isn't processed within this
* timeout, the associated {@link ClusterStateTaskListener#onFailure(String, Throwable)}
* is invoked.
*
* @return the timeout, or null if one is not set
*/
@Nullable
TimeValue timeout();
/**
* The {@link Priority} for this cluster state update task configuration.
*
* @return the priority
*/
Priority priority();
/**
* Build a cluster state update task configuration with the
* specified {@link Priority} and no timeout.
*
* @param priority the priority for the associated cluster state
* update task
* @return the resulting cluster state update task configuration
*/
static ClusterStateTaskConfig build(Priority priority) {
return new Basic(priority, null);
}
/**
* Build a cluster state update task configuration with the
* specified {@link Priority} and timeout.
*
* @param priority the priority for the associated cluster state
* update task
* @param timeout the timeout for the associated cluster state
* update task
* @return the result cluster state update task configuration
*/
static ClusterStateTaskConfig build(Priority priority, TimeValue timeout) {
return new Basic(priority, timeout);
}
class Basic implements ClusterStateTaskConfig {
final TimeValue timeout;
final Priority priority;
public Basic(Priority priority, TimeValue timeout) {
this.timeout = timeout;
this.priority = priority;
}
@Override
public TimeValue timeout() {
return timeout;
}
@Override
public Priority priority() {
return priority;
}
}
}

View File

@ -0,0 +1,132 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster;
import java.util.IdentityHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
public interface ClusterStateTaskExecutor<T> {
/**
* Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state
* should be changed.
*/
BatchResult<T> execute(ClusterState currentState, List<T> tasks) throws Exception;
/**
* indicates whether this task should only run if current node is master
*/
default boolean runOnlyOnMaster() {
return true;
}
/**
* Represents the result of a batched execution of cluster state update tasks
* @param <T> the type of the cluster state update task
*/
class BatchResult<T> {
final public ClusterState resultingState;
final public Map<T, TaskResult> executionResults;
/**
* Construct an execution result instance with a correspondence between the tasks and their execution result
* @param resultingState the resulting cluster state
* @param executionResults the correspondence between tasks and their outcome
*/
BatchResult(ClusterState resultingState, Map<T, TaskResult> executionResults) {
this.resultingState = resultingState;
this.executionResults = executionResults;
}
public static <T> Builder<T> builder() {
return new Builder<>();
}
public static class Builder<T> {
private final Map<T, TaskResult> executionResults = new IdentityHashMap<>();
public Builder<T> success(T task) {
return result(task, TaskResult.success());
}
public Builder<T> successes(Iterable<T> tasks) {
for (T task : tasks) {
success(task);
}
return this;
}
public Builder<T> failure(T task, Throwable t) {
return result(task, TaskResult.failure(t));
}
public Builder<T> failures(Iterable<T> tasks, Throwable t) {
for (T task : tasks) {
failure(task, t);
}
return this;
}
private Builder<T> result(T task, TaskResult executionResult) {
executionResults.put(task, executionResult);
return this;
}
public BatchResult<T> build(ClusterState resultingState) {
return new BatchResult<>(resultingState, executionResults);
}
}
}
final class TaskResult {
private final Throwable failure;
private static final TaskResult SUCCESS = new TaskResult(null);
public static TaskResult success() {
return SUCCESS;
}
public static TaskResult failure(Throwable failure) {
return new TaskResult(failure);
}
private TaskResult(Throwable failure) {
this.failure = failure;
}
public boolean isSuccess() {
return failure != null;
}
/**
* Handle the execution result with the provided consumers
* @param onSuccess handler to invoke on success
* @param onFailure handler to invoke on failure; the throwable passed through will not be null
*/
public void handle(Runnable onSuccess, Consumer<Throwable> onFailure) {
if (failure == null) {
onSuccess.run();
} else {
onFailure.accept(failure);
}
}
}
}

View File

@ -16,22 +16,28 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.index.fieldvisitor; package org.elasticsearch.cluster;
import org.apache.lucene.index.FieldInfo; import java.util.List;
import java.io.IOException; public interface ClusterStateTaskListener {
/** /**
*/ * A callback called when execute fails.
public class AllFieldsVisitor extends FieldsVisitor { */
void onFailure(String source, Throwable t);
public AllFieldsVisitor() { /**
super(true); * called when the task was rejected because the local node is no longer master
*/
default void onNoLongerMaster(String source) {
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
} }
@Override /**
public Status needsField(FieldInfo fieldInfo) throws IOException { * Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed
return Status.YES; * properly by all listeners.
*/
default void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
} }
} }

View File

@ -20,13 +20,31 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import java.util.List;
/** /**
* A task that can update the cluster state. * A task that can update the cluster state.
*/ */
abstract public class ClusterStateUpdateTask { abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<ClusterStateUpdateTask>, ClusterStateTaskListener {
final private Priority priority;
public ClusterStateUpdateTask() {
this(Priority.NORMAL);
}
public ClusterStateUpdateTask(Priority priority) {
this.priority = priority;
}
@Override
final public BatchResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
ClusterState result = execute(currentState);
return BatchResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
}
/** /**
* Update the cluster state based on the current state. Return the *same instance* if no state * Update the cluster state based on the current state. Return the *same instance* if no state
@ -39,28 +57,6 @@ abstract public class ClusterStateUpdateTask {
*/ */
abstract public void onFailure(String source, Throwable t); abstract public void onFailure(String source, Throwable t);
/**
* indicates whether this task should only run if current node is master
*/
public boolean runOnlyOnMaster() {
return true;
}
/**
* called when the task was rejected because the local node is no longer master
*/
public void onNoLongerMaster(String source) {
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
}
/**
* Called when the result of the {@link #execute(ClusterState)} have been processed
* properly by all listeners.
*/
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
/** /**
* If the cluster state update task wasn't processed by the provided timeout, call * If the cluster state update task wasn't processed by the provided timeout, call
* {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default). * {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default).
@ -70,5 +66,8 @@ abstract public class ClusterStateUpdateTask {
return null; return null;
} }
@Override
public Priority priority() {
return priority;
}
} }

View File

@ -29,7 +29,7 @@ import java.io.IOException;
public interface Diff<T> { public interface Diff<T> {
/** /**
* Applies difference to the specified part and retunrs the resulted part * Applies difference to the specified part and returns the resulted part
*/ */
T apply(T part); T apply(T part);

View File

@ -19,263 +19,630 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.IntCursor;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
public final class DiffableUtils { public final class DiffableUtils {
private DiffableUtils() { private DiffableUtils() {
} }
/**
* Returns a map key serializer for String keys
*/
public static KeySerializer<String> getStringKeySerializer() {
return StringKeySerializer.INSTANCE;
}
/**
* Returns a map key serializer for Integer keys. Encodes as Int.
*/
public static KeySerializer<Integer> getIntKeySerializer() {
return IntKeySerializer.INSTANCE;
}
/**
* Returns a map key serializer for Integer keys. Encodes as VInt.
*/
public static KeySerializer<Integer> getVIntKeySerializer() {
return VIntKeySerializer.INSTANCE;
}
/** /**
* Calculates diff between two ImmutableOpenMaps of Diffable objects * Calculates diff between two ImmutableOpenMaps of Diffable objects
*/ */
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> diff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) { public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer) {
assert after != null && before != null; assert after != null && before != null;
return new ImmutableOpenMapDiff<>(before, after); return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
}
/**
* Calculates diff between two ImmutableOpenMaps of non-diffable objects
*/
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
assert after != null && before != null;
return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer);
}
/**
* Calculates diff between two ImmutableOpenIntMaps of Diffable objects
*/
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer) {
assert after != null && before != null;
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
}
/**
* Calculates diff between two ImmutableOpenIntMaps of non-diffable objects
*/
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer, NonDiffableValueSerializer<Integer, T> valueSerializer) {
assert after != null && before != null;
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer);
} }
/** /**
* Calculates diff between two Maps of Diffable objects. * Calculates diff between two Maps of Diffable objects.
*/ */
public static <T extends Diffable<T>> Diff<Map<String, T>> diff(Map<String, T> before, Map<String, T> after) { public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer) {
assert after != null && before != null; assert after != null && before != null;
return new JdkMapDiff<>(before, after); return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
}
/**
* Calculates diff between two Maps of non-diffable objects
*/
public static <K, T> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
assert after != null && before != null;
return new JdkMapDiff<>(before, after, keySerializer, valueSerializer);
} }
/** /**
* Loads an object that represents difference between two ImmutableOpenMaps * Loads an object that represents difference between two ImmutableOpenMaps
*/ */
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException { public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
return new ImmutableOpenMapDiff<>(in, keyedReader); return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer);
}
/**
* Loads an object that represents difference between two Maps.
*/
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
return new JdkMapDiff<>(in, keyedReader);
} }
/** /**
* Loads an object that represents difference between two ImmutableOpenMaps * Loads an object that represents difference between two ImmutableOpenMaps
*/ */
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException { public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto)); return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer);
} }
/** /**
* Loads an object that represents difference between two Maps. * Loads an object that represents difference between two Maps of Diffable objects
*/ */
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, T proto) throws IOException { public static <K, T> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
return new JdkMapDiff<>(in, new PrototypeReader<>(proto)); return new JdkMapDiff<>(in, keySerializer, valueSerializer);
} }
/** /**
* A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's * Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object
* used in custom metadata deserialization.
*/ */
public interface KeyedReader<T> { public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
/**
* reads an object of the type T from the stream input
*/
T readFrom(StreamInput in, String key) throws IOException;
/**
* reads an object that respresents differences between two objects with the type T from the stream input
*/
Diff<T> readDiffFrom(StreamInput in, String key) throws IOException;
} }
/** /**
* Implementation of the KeyedReader that is using a prototype object for reading operations * Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object
*
* Note: this implementation is ignoring the key.
*/ */
public static class PrototypeReader<T extends Diffable<T>> implements KeyedReader<T> { public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, T proto) throws IOException {
private T proto; return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
public PrototypeReader(T proto) {
this.proto = proto;
}
@Override
public T readFrom(StreamInput in, String key) throws IOException {
return proto.readFrom(in);
}
@Override
public Diff<T> readDiffFrom(StreamInput in, String key) throws IOException {
return proto.readDiffFrom(in);
}
} }
/** /**
* Represents differences between two Maps of Diffable objects. * Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object
*/
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
}
/**
* Represents differences between two Maps of (possibly diffable) objects.
* *
* @param <T> the diffable object * @param <T> the diffable object
*/ */
private static class JdkMapDiff<T extends Diffable<T>> extends MapDiff<T, Map<String, T>> { private static class JdkMapDiff<K, T> extends MapDiff<K, T, Map<K, T>> {
protected JdkMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException { protected JdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
super(in, reader); super(in, keySerializer, valueSerializer);
} }
public JdkMapDiff(Map<String, T> before, Map<String, T> after) { public JdkMapDiff(Map<K, T> before, Map<K, T> after,
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
super(keySerializer, valueSerializer);
assert after != null && before != null; assert after != null && before != null;
for (String key : before.keySet()) {
for (K key : before.keySet()) {
if (!after.containsKey(key)) { if (!after.containsKey(key)) {
deletes.add(key); deletes.add(key);
} }
} }
for (Map.Entry<String, T> partIter : after.entrySet()) {
for (Map.Entry<K, T> partIter : after.entrySet()) {
T beforePart = before.get(partIter.getKey()); T beforePart = before.get(partIter.getKey());
if (beforePart == null) { if (beforePart == null) {
adds.put(partIter.getKey(), partIter.getValue()); upserts.put(partIter.getKey(), partIter.getValue());
} else if (partIter.getValue().equals(beforePart) == false) { } else if (partIter.getValue().equals(beforePart) == false) {
diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart)); if (valueSerializer.supportsDiffableValues()) {
diffs.put(partIter.getKey(), valueSerializer.diff(partIter.getValue(), beforePart));
} else {
upserts.put(partIter.getKey(), partIter.getValue());
}
} }
} }
} }
@Override @Override
public Map<String, T> apply(Map<String, T> map) { public Map<K, T> apply(Map<K, T> map) {
Map<String, T> builder = new HashMap<>(); Map<K, T> builder = new HashMap<>();
builder.putAll(map); builder.putAll(map);
for (String part : deletes) { for (K part : deletes) {
builder.remove(part); builder.remove(part);
} }
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) { for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
} }
for (Map.Entry<String, T> additon : adds.entrySet()) { for (Map.Entry<K, T> upsert : upserts.entrySet()) {
builder.put(additon.getKey(), additon.getValue()); builder.put(upsert.getKey(), upsert.getValue());
} }
return builder; return builder;
} }
} }
/** /**
* Represents differences between two ImmutableOpenMap of diffable objects * Represents differences between two ImmutableOpenMap of (possibly diffable) objects
* *
* @param <T> the diffable object * @param <T> the object type
*/ */
private static class ImmutableOpenMapDiff<T extends Diffable<T>> extends MapDiff<T, ImmutableOpenMap<String, T>> { private static class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
protected ImmutableOpenMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException { protected ImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
super(in, reader); super(in, keySerializer, valueSerializer);
} }
public ImmutableOpenMapDiff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) { public ImmutableOpenMapDiff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after,
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
super(keySerializer, valueSerializer);
assert after != null && before != null; assert after != null && before != null;
for (ObjectCursor<String> key : before.keys()) {
for (ObjectCursor<K> key : before.keys()) {
if (!after.containsKey(key.value)) { if (!after.containsKey(key.value)) {
deletes.add(key.value); deletes.add(key.value);
} }
} }
for (ObjectObjectCursor<String, T> partIter : after) {
for (ObjectObjectCursor<K, T> partIter : after) {
T beforePart = before.get(partIter.key); T beforePart = before.get(partIter.key);
if (beforePart == null) { if (beforePart == null) {
adds.put(partIter.key, partIter.value); upserts.put(partIter.key, partIter.value);
} else if (partIter.value.equals(beforePart) == false) { } else if (partIter.value.equals(beforePart) == false) {
diffs.put(partIter.key, partIter.value.diff(beforePart)); if (valueSerializer.supportsDiffableValues()) {
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
} else {
upserts.put(partIter.key, partIter.value);
}
} }
} }
} }
@Override @Override
public ImmutableOpenMap<String, T> apply(ImmutableOpenMap<String, T> map) { public ImmutableOpenMap<K, T> apply(ImmutableOpenMap<K, T> map) {
ImmutableOpenMap.Builder<String, T> builder = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder<K, T> builder = ImmutableOpenMap.builder();
builder.putAll(map); builder.putAll(map);
for (String part : deletes) { for (K part : deletes) {
builder.remove(part); builder.remove(part);
} }
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) { for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
} }
for (Map.Entry<String, T> additon : adds.entrySet()) { for (Map.Entry<K, T> upsert : upserts.entrySet()) {
builder.put(additon.getKey(), additon.getValue()); builder.put(upsert.getKey(), upsert.getValue());
} }
return builder.build(); return builder.build();
} }
} }
/** /**
* Represents differences between two maps of diffable objects * Represents differences between two ImmutableOpenIntMap of (possibly diffable) objects
* *
* This class is used as base class for different map implementations * @param <T> the object type
*
* @param <T> the diffable object
*/ */
private static abstract class MapDiff<T extends Diffable<T>, M> implements Diff<M> { private static class ImmutableOpenIntMapDiff<T> extends MapDiff<Integer, T, ImmutableOpenIntMap<T>> {
protected final List<String> deletes; protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
protected final Map<String, Diff<T>> diffs; super(in, keySerializer, valueSerializer);
protected final Map<String, T> adds;
protected MapDiff() {
deletes = new ArrayList<>();
diffs = new HashMap<>();
adds = new HashMap<>();
} }
protected MapDiff(StreamInput in, KeyedReader<T> reader) throws IOException { public ImmutableOpenIntMapDiff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after,
KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) {
super(keySerializer, valueSerializer);
assert after != null && before != null;
for (IntCursor key : before.keys()) {
if (!after.containsKey(key.value)) {
deletes.add(key.value);
}
}
for (IntObjectCursor<T> partIter : after) {
T beforePart = before.get(partIter.key);
if (beforePart == null) {
upserts.put(partIter.key, partIter.value);
} else if (partIter.value.equals(beforePart) == false) {
if (valueSerializer.supportsDiffableValues()) {
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
} else {
upserts.put(partIter.key, partIter.value);
}
}
}
}
@Override
public ImmutableOpenIntMap<T> apply(ImmutableOpenIntMap<T> map) {
ImmutableOpenIntMap.Builder<T> builder = ImmutableOpenIntMap.builder();
builder.putAll(map);
for (Integer part : deletes) {
builder.remove(part);
}
for (Map.Entry<Integer, Diff<T>> diff : diffs.entrySet()) {
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
}
for (Map.Entry<Integer, T> upsert : upserts.entrySet()) {
builder.put(upsert.getKey(), upsert.getValue());
}
return builder.build();
}
}
/**
* Represents differences between two maps of objects and is used as base class for different map implementations.
*
* Implements serialization. How differences are applied is left to subclasses.
*
* @param <K> the type of map keys
* @param <T> the type of map values
* @param <M> the map implementation type
*/
public static abstract class MapDiff<K, T, M> implements Diff<M> {
protected final List<K> deletes;
protected final Map<K, Diff<T>> diffs; // incremental updates
protected final Map<K, T> upserts; // additions or full updates
protected final KeySerializer<K> keySerializer;
protected final ValueSerializer<K, T> valueSerializer;
protected MapDiff(KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
deletes = new ArrayList<>(); deletes = new ArrayList<>();
diffs = new HashMap<>(); diffs = new HashMap<>();
adds = new HashMap<>(); upserts = new HashMap<>();
}
protected MapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
deletes = new ArrayList<>();
diffs = new HashMap<>();
upserts = new HashMap<>();
int deletesCount = in.readVInt(); int deletesCount = in.readVInt();
for (int i = 0; i < deletesCount; i++) { for (int i = 0; i < deletesCount; i++) {
deletes.add(in.readString()); deletes.add(keySerializer.readKey(in));
} }
int diffsCount = in.readVInt(); int diffsCount = in.readVInt();
for (int i = 0; i < diffsCount; i++) { for (int i = 0; i < diffsCount; i++) {
String key = in.readString(); K key = keySerializer.readKey(in);
Diff<T> diff = reader.readDiffFrom(in, key); Diff<T> diff = valueSerializer.readDiff(in, key);
diffs.put(key, diff); diffs.put(key, diff);
} }
int upsertsCount = in.readVInt();
int addsCount = in.readVInt(); for (int i = 0; i < upsertsCount; i++) {
for (int i = 0; i < addsCount; i++) { K key = keySerializer.readKey(in);
String key = in.readString(); T newValue = valueSerializer.read(in, key);
T part = reader.readFrom(in, key); upserts.put(key, newValue);
adds.put(key, part);
} }
} }
/**
* The keys that, when this diff is applied to a map, should be removed from the map.
*
* @return the list of keys that are deleted
*/
public List<K> getDeletes() {
return deletes;
}
/**
* Map entries that, when this diff is applied to a map, should be
* incrementally updated. The incremental update is represented using
* the {@link Diff} interface.
*
* @return the map entries that are incrementally updated
*/
public Map<K, Diff<T>> getDiffs() {
return diffs;
}
/**
* Map entries that, when this diff is applied to a map, should be
* added to the map or fully replace the previous value.
*
* @return the map entries that are additions or full updates
*/
public Map<K, T> getUpserts() {
return upserts;
}
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(deletes.size()); out.writeVInt(deletes.size());
for (String delete : deletes) { for (K delete : deletes) {
out.writeString(delete); keySerializer.writeKey(delete, out);
} }
out.writeVInt(diffs.size()); out.writeVInt(diffs.size());
for (Map.Entry<String, Diff<T>> entry : diffs.entrySet()) { for (Map.Entry<K, Diff<T>> entry : diffs.entrySet()) {
out.writeString(entry.getKey()); keySerializer.writeKey(entry.getKey(), out);
entry.getValue().writeTo(out); valueSerializer.writeDiff(entry.getValue(), out);
} }
out.writeVInt(upserts.size());
out.writeVInt(adds.size()); for (Map.Entry<K, T> entry : upserts.entrySet()) {
for (Map.Entry<String, T> entry : adds.entrySet()) { keySerializer.writeKey(entry.getKey(), out);
out.writeString(entry.getKey()); valueSerializer.write(entry.getValue(), out);
entry.getValue().writeTo(out);
} }
} }
} }
/**
* Provides read and write operations to serialize keys of map
* @param <K> type of key
*/
public interface KeySerializer<K> {
void writeKey(K key, StreamOutput out) throws IOException;
K readKey(StreamInput in) throws IOException;
}
/**
* Serializes String keys of a map
*/
private static final class StringKeySerializer implements KeySerializer<String> {
private static final StringKeySerializer INSTANCE = new StringKeySerializer();
@Override
public void writeKey(String key, StreamOutput out) throws IOException {
out.writeString(key);
}
@Override
public String readKey(StreamInput in) throws IOException {
return in.readString();
}
}
/**
* Serializes Integer keys of a map as an Int
*/
private static final class IntKeySerializer implements KeySerializer<Integer> {
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
@Override
public void writeKey(Integer key, StreamOutput out) throws IOException {
out.writeInt(key);
}
@Override
public Integer readKey(StreamInput in) throws IOException {
return in.readInt();
}
}
/**
* Serializes Integer keys of a map as a VInt. Requires keys to be positive.
*/
private static final class VIntKeySerializer implements KeySerializer<Integer> {
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
@Override
public void writeKey(Integer key, StreamOutput out) throws IOException {
if (key < 0) {
throw new IllegalArgumentException("Map key [" + key + "] must be positive");
}
out.writeVInt(key);
}
@Override
public Integer readKey(StreamInput in) throws IOException {
return in.readVInt();
}
}
/**
* Provides read and write operations to serialize map values.
* Reading of values can be made dependent on map key.
*
* Also provides operations to distinguish whether map values are diffable.
*
* Should not be directly implemented, instead implement either
* {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}.
*
* @param <K> key type of map
* @param <V> value type of map
*/
public interface ValueSerializer<K, V> {
/**
* Writes value to stream
*/
void write(V value, StreamOutput out) throws IOException;
/**
* Reads value from stream. Reading operation can be made dependent on map key.
*/
V read(StreamInput in, K key) throws IOException;
/**
* Whether this serializer supports diffable values
*/
boolean supportsDiffableValues();
/**
* Computes diff if this serializer supports diffable values
*/
Diff<V> diff(V value, V beforePart);
/**
* Writes value as diff to stream if this serializer supports diffable values
*/
void writeDiff(Diff<V> value, StreamOutput out) throws IOException;
/**
* Reads value as diff from stream if this serializer supports diffable values.
* Reading operation can be made dependent on map key.
*/
Diff<V> readDiff(StreamInput in, K key) throws IOException;
}
/**
* Serializer for Diffable map values. Needs to implement read and readDiff methods.
*
* @param <K> type of map keys
* @param <V> type of map values
*/
public static abstract class DiffableValueSerializer<K, V extends Diffable<V>> implements ValueSerializer<K, V> {
private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() {
@Override
public Object read(StreamInput in, Object key) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Diff<Object> readDiff(StreamInput in, Object key) throws IOException {
throw new UnsupportedOperationException();
}
};
private static <K, V extends Diffable<V>> DiffableValueSerializer<K, V> getWriteOnlyInstance() {
return WRITE_ONLY_INSTANCE;
}
@Override
public boolean supportsDiffableValues() {
return true;
}
@Override
public Diff<V> diff(V value, V beforePart) {
return value.diff(beforePart);
}
@Override
public void write(V value, StreamOutput out) throws IOException {
value.writeTo(out);
}
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
value.writeTo(out);
}
}
/**
* Serializer for non-diffable map values
*
* @param <K> type of map keys
* @param <V> type of map values
*/
public static abstract class NonDiffableValueSerializer<K, V> implements ValueSerializer<K, V> {
@Override
public boolean supportsDiffableValues() {
return false;
}
@Override
public Diff<V> diff(V value, V beforePart) {
throw new UnsupportedOperationException();
}
@Override
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
throw new UnsupportedOperationException();
}
}
/**
* Implementation of the ValueSerializer that uses a prototype object for reading operations
*
* Note: this implementation is ignoring the key.
*/
public static class DiffablePrototypeValueReader<K, V extends Diffable<V>> extends DiffableValueSerializer<K, V> {
private final V proto;
public DiffablePrototypeValueReader(V proto) {
this.proto = proto;
}
@Override
public V read(StreamInput in, K key) throws IOException {
return proto.readFrom(in);
}
@Override
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
return proto.readDiffFrom(in);
}
}
/**
* Implementation of ValueSerializer that serializes immutable sets
*
* @param <K> type of map key
*/
public static class StringSetValueSerializer<K> extends NonDiffableValueSerializer<K, Set<String>> {
private static final StringSetValueSerializer INSTANCE = new StringSetValueSerializer();
public static <K> StringSetValueSerializer<K> getInstance() {
return INSTANCE;
}
@Override
public void write(Set<String> value, StreamOutput out) throws IOException {
out.writeStringArray(value.toArray(new String[value.size()]));
}
@Override
public Set<String> read(StreamInput in, K key) throws IOException {
return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(in.readStringArray())));
}
}
} }

View File

@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaDataMappingService; import org.elasticsearch.cluster.metadata.MetaDataMappingService;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -57,7 +56,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) { public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) {
final DiscoveryNodes nodes = state.nodes(); final DiscoveryNodes nodes = state.nodes();
if (nodes.masterNode() == null) { if (nodes.masterNode() == null) {
logger.warn("can't send mapping refresh for [{}][{}], no master known.", request.index(), Strings.arrayToCommaDelimitedString(request.types())); logger.warn("can't send mapping refresh for [{}], no master known.", request.index());
return; return;
} }
transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
@ -67,7 +66,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
@Override @Override
public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception { public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types()); metaDataMappingService.refreshMapping(request.index(), request.indexUUID());
channel.sendResponse(TransportResponse.Empty.INSTANCE); channel.sendResponse(TransportResponse.Empty.INSTANCE);
} }
} }
@ -76,16 +75,14 @@ public class NodeMappingRefreshAction extends AbstractComponent {
private String index; private String index;
private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
private String[] types;
private String nodeId; private String nodeId;
public NodeMappingRefreshRequest() { public NodeMappingRefreshRequest() {
} }
public NodeMappingRefreshRequest(String index, String indexUUID, String[] types, String nodeId) { public NodeMappingRefreshRequest(String index, String indexUUID, String nodeId) {
this.index = index; this.index = index;
this.indexUUID = indexUUID; this.indexUUID = indexUUID;
this.types = types;
this.nodeId = nodeId; this.nodeId = nodeId;
} }
@ -107,11 +104,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
return indexUUID; return indexUUID;
} }
public String[] types() {
return types;
}
public String nodeId() { public String nodeId() {
return nodeId; return nodeId;
} }
@ -120,7 +112,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeString(index); out.writeString(index);
out.writeStringArray(types);
out.writeString(nodeId); out.writeString(nodeId);
out.writeString(indexUUID); out.writeString(indexUUID);
} }
@ -129,7 +120,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
index = in.readString(); index = in.readString();
types = in.readStringArray();
nodeId = in.readString(); nodeId = in.readString();
indexUUID = in.readString(); indexUUID = in.readString();
} }

View File

@ -20,9 +20,7 @@
package org.elasticsearch.cluster.action.shard; package org.elasticsearch.cluster.action.shard;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingService;
@ -38,15 +36,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*; import org.elasticsearch.transport.*;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
@ -63,9 +58,6 @@ public class ShardStateAction extends AbstractComponent {
private final AllocationService allocationService; private final AllocationService allocationService;
private final RoutingService routingService; private final RoutingService routingService;
private final BlockingQueue<ShardRoutingEntry> startedShardsQueue = ConcurrentCollections.newBlockingQueue();
private final BlockingQueue<ShardRoutingEntry> failedShardQueue = ConcurrentCollections.newBlockingQueue();
@Inject @Inject
public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
AllocationService allocationService, RoutingService routingService) { AllocationService allocationService, RoutingService routingService) {
@ -141,104 +133,94 @@ public class ShardStateAction extends AbstractComponent {
}); });
} }
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
failedShardQueue.add(shardRoutingEntry); clusterService.submitStateUpdateTask(
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ClusterStateUpdateTask() { "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
shardRoutingEntry,
ClusterStateTaskConfig.build(Priority.HIGH),
shardFailedClusterStateHandler,
shardFailedClusterStateHandler);
}
@Override class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
public ClusterState execute(ClusterState currentState) { @Override
if (shardRoutingEntry.processed) { public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
return currentState; BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
} List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
for (ShardRoutingEntry task : tasks) {
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>(); shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
failedShardQueue.drainTo(shardRoutingEntries);
// nothing to process (a previous event has processed it already)
if (shardRoutingEntries.isEmpty()) {
return currentState;
}
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(shardRoutingEntries.size());
// mark all entries as processed
for (ShardRoutingEntry entry : shardRoutingEntries) {
entry.processed = true;
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(entry.shardRouting, entry.message, entry.failure));
}
RoutingAllocation.Result routingResult = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
if (!routingResult.changed()) {
return currentState;
}
return ClusterState.builder(currentState).routingResult(routingResult).build();
} }
ClusterState maybeUpdatedState = currentState;
@Override try {
public void onFailure(String source, Throwable t) { RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
logger.error("unexpected failure during [{}]", t, source); if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
batchResultBuilder.successes(tasks);
} catch (Throwable t) {
batchResultBuilder.failures(tasks, t);
} }
return batchResultBuilder.build(maybeUpdatedState);
}
@Override @Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) { if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
logger.trace("unassigned shards after shard failures. scheduling a reroute."); logger.trace("unassigned shards after shard failures. scheduling a reroute.");
routingService.reroute("unassigned shards after shard failures, scheduling a reroute"); routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
} }
} }
});
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
} }
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
new ShardStartedClusterStateHandler();
private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
logger.debug("received shard started for {}", shardRoutingEntry); logger.debug("received shard started for {}", shardRoutingEntry);
// buffer shard started requests, and the state update tasks will simply drain it
// this is to optimize the number of "started" events we generate, and batch them
// possibly, we can do time based batching as well, but usually, we would want to
// process started events as fast as possible, to make shards available
startedShardsQueue.add(shardRoutingEntry);
clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", Priority.URGENT, clusterService.submitStateUpdateTask(
new ClusterStateUpdateTask() { "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
@Override shardRoutingEntry,
public ClusterState execute(ClusterState currentState) { ClusterStateTaskConfig.build(Priority.URGENT),
shardStartedClusterStateHandler,
shardStartedClusterStateHandler);
}
if (shardRoutingEntry.processed) { class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
return currentState; @Override
} public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
for (ShardRoutingEntry task : tasks) {
shardRoutingsToBeApplied.add(task.shardRouting);
}
ClusterState maybeUpdatedState = currentState;
try {
RoutingAllocation.Result result =
allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
builder.successes(tasks);
} catch (Throwable t) {
builder.failures(tasks, t);
}
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>(); return builder.build(maybeUpdatedState);
startedShardsQueue.drainTo(shardRoutingEntries); }
// nothing to process (a previous event has processed it already) @Override
if (shardRoutingEntries.isEmpty()) { public void onFailure(String source, Throwable t) {
return currentState; logger.error("unexpected failure during [{}]", t, source);
} }
List<ShardRouting> shardRoutingToBeApplied = new ArrayList<>(shardRoutingEntries.size());
// mark all entries as processed
for (ShardRoutingEntry entry : shardRoutingEntries) {
entry.processed = true;
shardRoutingToBeApplied.add(entry.shardRouting);
}
if (shardRoutingToBeApplied.isEmpty()) {
return currentState;
}
RoutingAllocation.Result routingResult = allocationService.applyStartedShards(currentState, shardRoutingToBeApplied, true);
if (!routingResult.changed()) {
return currentState;
}
return ClusterState.builder(currentState).routingResult(routingResult).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
} }
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
@ -266,8 +248,6 @@ public class ShardStateAction extends AbstractComponent {
String message; String message;
Throwable failure; Throwable failure;
volatile boolean processed; // state field, no need to serialize
public ShardRoutingEntry() { public ShardRoutingEntry() {
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.metadata; package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -30,6 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
@ -46,10 +48,13 @@ import org.joda.time.DateTimeZone;
import java.io.IOException; import java.io.IOException;
import java.text.ParseException; import java.text.ParseException;
import java.util.Collections;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Set;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
@ -168,6 +173,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
public static final String INDEX_UUID_NA_VALUE = "_na_"; public static final String INDEX_UUID_NA_VALUE = "_na_";
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
private final int numberOfShards; private final int numberOfShards;
private final int numberOfReplicas; private final int numberOfReplicas;
@ -184,6 +191,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final ImmutableOpenMap<String, Custom> customs; private final ImmutableOpenMap<String, Custom> customs;
private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
private transient final int totalNumberOfShards; private transient final int totalNumberOfShards;
private final DiscoveryNodeFilters requireFilters; private final DiscoveryNodeFilters requireFilters;
@ -194,65 +203,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final Version indexUpgradedVersion; private final Version indexUpgradedVersion;
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion; private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) { private IndexMetaData(String index, long version, State state, int numberOfShards, int numberOfReplicas, Settings settings,
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null); ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
if (maybeNumberOfShards == null) { ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]"); DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
} Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion) {
int numberOfShards = maybeNumberOfShards;
if (numberOfShards <= 0) {
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
}
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
if (maybeNumberOfReplicas == null) {
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
}
int numberOfReplicas = maybeNumberOfReplicas;
if (numberOfReplicas < 0) {
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
}
this.index = index; this.index = index;
this.version = version; this.version = version;
this.state = state; this.state = state;
this.settings = settings;
this.mappings = mappings;
this.customs = customs;
this.numberOfShards = numberOfShards; this.numberOfShards = numberOfShards;
this.numberOfReplicas = numberOfReplicas; this.numberOfReplicas = numberOfReplicas;
this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1); this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
this.settings = settings;
this.mappings = mappings;
this.customs = customs;
this.aliases = aliases; this.aliases = aliases;
this.activeAllocationIds = activeAllocationIds;
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap(); this.requireFilters = requireFilters;
if (requireMap.isEmpty()) { this.includeFilters = includeFilters;
requireFilters = null; this.excludeFilters = excludeFilters;
} else { this.indexCreatedVersion = indexCreatedVersion;
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); this.indexUpgradedVersion = indexUpgradedVersion;
} this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
if (includeMap.isEmpty()) {
includeFilters = null;
} else {
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
}
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
if (excludeMap.isEmpty()) {
excludeFilters = null;
} else {
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
}
indexCreatedVersion = Version.indexCreated(settings);
indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
if (stringLuceneVersion != null) {
try {
this.minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
} catch (ParseException ex) {
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
}
} else {
this.minimumCompatibleLuceneVersion = null;
}
} }
public String getIndex() { public String getIndex() {
@ -364,6 +337,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return (T) customs.get(type); return (T) customs.get(type);
} }
public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
return activeAllocationIds;
}
public Set<String> activeAllocationIds(int shardId) {
assert shardId >= 0 && shardId < numberOfShards;
return activeAllocationIds.get(shardId);
}
@Nullable @Nullable
public DiscoveryNodeFilters requireFilters() { public DiscoveryNodeFilters requireFilters() {
return requireFilters; return requireFilters;
@ -408,6 +390,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
if (!customs.equals(that.customs)) { if (!customs.equals(that.customs)) {
return false; return false;
} }
if (!activeAllocationIds.equals(that.activeAllocationIds)) {
return false;
}
return true; return true;
} }
@ -418,6 +403,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
result = 31 * result + aliases.hashCode(); result = 31 * result + aliases.hashCode();
result = 31 * result + settings.hashCode(); result = 31 * result + settings.hashCode();
result = 31 * result + mappings.hashCode(); result = 31 * result + mappings.hashCode();
result = 31 * result + activeAllocationIds.hashCode();
return result; return result;
} }
@ -450,16 +436,19 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final Settings settings; private final Settings settings;
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings; private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases; private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
private Diff<ImmutableOpenMap<String, Custom>> customs; private final Diff<ImmutableOpenMap<String, Custom>> customs;
private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;
public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
index = after.index; index = after.index;
version = after.version; version = after.version;
state = after.state; state = after.state;
settings = after.settings; settings = after.settings;
mappings = DiffableUtils.diff(before.mappings, after.mappings); mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
aliases = DiffableUtils.diff(before.aliases, after.aliases); aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
customs = DiffableUtils.diff(before.customs, after.customs); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
} }
public IndexMetaDataDiff(StreamInput in) throws IOException { public IndexMetaDataDiff(StreamInput in) throws IOException {
@ -467,19 +456,22 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
version = in.readLong(); version = in.readLong();
state = State.fromId(in.readByte()); state = State.fromId(in.readByte());
settings = Settings.readSettingsFromStream(in); settings = Settings.readSettingsFromStream(in);
mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO); mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO); aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader<Custom>() { customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
@Override @Override
public Custom readFrom(StreamInput in, String key) throws IOException { public Custom read(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readFrom(in); return lookupPrototypeSafe(key).readFrom(in);
} }
@Override @Override
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException { public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readDiffFrom(in); return lookupPrototypeSafe(key).readDiffFrom(in);
} }
}); });
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
DiffableUtils.StringSetValueSerializer.getInstance());
} }
@Override @Override
@ -491,6 +483,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
mappings.writeTo(out); mappings.writeTo(out);
aliases.writeTo(out); aliases.writeTo(out);
customs.writeTo(out); customs.writeTo(out);
activeAllocationIds.writeTo(out);
} }
@Override @Override
@ -502,6 +495,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
builder.mappings.putAll(mappings.apply(part.mappings)); builder.mappings.putAll(mappings.apply(part.mappings));
builder.aliases.putAll(aliases.apply(part.aliases)); builder.aliases.putAll(aliases.apply(part.aliases));
builder.customs.putAll(customs.apply(part.customs)); builder.customs.putAll(customs.apply(part.customs));
builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
return builder.build(); return builder.build();
} }
} }
@ -528,6 +522,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData); builder.putCustom(type, customIndexMetaData);
} }
int activeAllocationIdsSize = in.readVInt();
for (int i = 0; i < activeAllocationIdsSize; i++) {
int key = in.readVInt();
Set<String> allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
builder.putActiveAllocationIds(key, allocationIds);
}
return builder.build(); return builder.build();
} }
@ -550,6 +550,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
out.writeString(cursor.key); out.writeString(cursor.key);
cursor.value.writeTo(out); cursor.value.writeTo(out);
} }
out.writeVInt(activeAllocationIds.size());
for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
out.writeVInt(cursor.key);
DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
}
} }
public static Builder builder(String index) { public static Builder builder(String index) {
@ -569,12 +574,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings; private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases; private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
private final ImmutableOpenMap.Builder<String, Custom> customs; private final ImmutableOpenMap.Builder<String, Custom> customs;
private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;
public Builder(String index) { public Builder(String index) {
this.index = index; this.index = index;
this.mappings = ImmutableOpenMap.builder(); this.mappings = ImmutableOpenMap.builder();
this.aliases = ImmutableOpenMap.builder(); this.aliases = ImmutableOpenMap.builder();
this.customs = ImmutableOpenMap.builder(); this.customs = ImmutableOpenMap.builder();
this.activeAllocationIds = ImmutableOpenIntMap.builder();
} }
public Builder(IndexMetaData indexMetaData) { public Builder(IndexMetaData indexMetaData) {
@ -585,6 +592,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings); this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases); this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
this.customs = ImmutableOpenMap.builder(indexMetaData.customs); this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
} }
public String index() { public String index() {
@ -693,6 +701,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
return this.customs.get(type); return this.customs.get(type);
} }
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
activeAllocationIds.put(shardId, new HashSet(allocationIds));
return this;
}
public Set<String> getActiveAllocationIds(int shardId) {
return activeAllocationIds.get(shardId);
}
public long version() { public long version() {
return this.version; return this.version;
} }
@ -714,7 +731,72 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
} }
} }
return new IndexMetaData(index, version, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build()); Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
if (maybeNumberOfShards == null) {
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
}
int numberOfShards = maybeNumberOfShards;
if (numberOfShards <= 0) {
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
}
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
if (maybeNumberOfReplicas == null) {
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
}
int numberOfReplicas = maybeNumberOfReplicas;
if (numberOfReplicas < 0) {
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
}
// fill missing slots in activeAllocationIds with empty set if needed and make all entries immutable
ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds = ImmutableOpenIntMap.builder();
for (int i = 0; i < numberOfShards; i++) {
if (activeAllocationIds.containsKey(i)) {
filledActiveAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
} else {
filledActiveAllocationIds.put(i, Collections.emptySet());
}
}
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
final DiscoveryNodeFilters requireFilters;
if (requireMap.isEmpty()) {
requireFilters = null;
} else {
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
}
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
final DiscoveryNodeFilters includeFilters;
if (includeMap.isEmpty()) {
includeFilters = null;
} else {
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
}
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
final DiscoveryNodeFilters excludeFilters;
if (excludeMap.isEmpty()) {
excludeFilters = null;
} else {
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
}
Version indexCreatedVersion = Version.indexCreated(settings);
Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
if (stringLuceneVersion != null) {
try {
minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
} catch (ParseException ex) {
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
}
} else {
minimumCompatibleLuceneVersion = null;
}
return new IndexMetaData(index, version, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
} }
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
@ -757,6 +839,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
} }
builder.endObject(); builder.endObject();
builder.startObject(KEY_ACTIVE_ALLOCATIONS);
for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
builder.startArray(String.valueOf(cursor.key));
for (String allocationId : cursor.value) {
builder.value(allocationId);
}
builder.endArray();
}
builder.endObject();
builder.endObject(); builder.endObject();
} }
@ -792,6 +883,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
while (parser.nextToken() != XContentParser.Token.END_OBJECT) { while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
builder.putAlias(AliasMetaData.Builder.fromXContent(parser)); builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
} }
} else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
String shardId = currentFieldName;
Set<String> allocationIds = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
allocationIds.add(parser.text());
}
}
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
}
}
} else { } else {
// check if its a custom index metadata // check if its a custom index metadata
Custom proto = lookupPrototype(currentFieldName); Custom proto = lookupPrototype(currentFieldName);

View File

@ -27,7 +27,6 @@ import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.Diffable;
import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -41,6 +40,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.settings.loader.SettingsLoader;
@ -54,7 +54,6 @@ import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.search.warmer.IndexWarmersMetaData;
@ -640,9 +639,9 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
version = after.version; version = after.version;
transientSettings = after.transientSettings; transientSettings = after.transientSettings;
persistentSettings = after.persistentSettings; persistentSettings = after.persistentSettings;
indices = DiffableUtils.diff(before.indices, after.indices); indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer());
templates = DiffableUtils.diff(before.templates, after.templates); templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer());
customs = DiffableUtils.diff(before.customs, after.customs); customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
} }
public MetaDataDiff(StreamInput in) throws IOException { public MetaDataDiff(StreamInput in) throws IOException {
@ -650,16 +649,17 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
version = in.readLong(); version = in.readLong();
transientSettings = Settings.readSettingsFromStream(in); transientSettings = Settings.readSettingsFromStream(in);
persistentSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in);
indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO); indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO);
templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO);
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() { customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
@Override @Override
public Custom readFrom(StreamInput in, String key) throws IOException { public Custom read(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readFrom(in); return lookupPrototypeSafe(key).readFrom(in);
} }
@Override @Override
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException { public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readDiffFrom(in); return lookupPrototypeSafe(key).readDiffFrom(in);
} }
}); });
@ -748,8 +748,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC,
RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE,
RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC));
RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC));
/** All known time cluster settings. */ /** All known time cluster settings. */
@ -1029,12 +1028,18 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) { for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) {
AliasMetaData aliasMetaData = aliasCursor.value; AliasMetaData aliasMetaData = aliasCursor.value;
AliasOrIndex.Alias aliasOrIndex = (AliasOrIndex.Alias) aliasAndIndexLookup.get(aliasMetaData.getAlias()); AliasOrIndex aliasOrIndex = aliasAndIndexLookup.get(aliasMetaData.getAlias());
if (aliasOrIndex == null) { if (aliasOrIndex == null) {
aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData); aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex); aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex);
} else if (aliasOrIndex instanceof AliasOrIndex.Alias) {
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
alias.addIndex(indexMetaData);
} else if (aliasOrIndex instanceof AliasOrIndex.Index) {
AliasOrIndex.Index index = (AliasOrIndex.Index) aliasOrIndex;
throw new IllegalStateException("index and alias names need to be unique, but alias [" + aliasMetaData.getAlias() + "] and index [" + index.getIndex().getIndex() + "] have the same name");
} else { } else {
aliasOrIndex.addIndex(indexMetaData); throw new IllegalStateException("unexpected alias [" + aliasMetaData.getAlias() + "][" + aliasOrIndex + "]");
} }
} }
} }

View File

@ -170,12 +170,12 @@ public class MetaDataCreateIndexService extends AbstractComponent {
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX); updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
request.settings(updatedSettingsBuilder.build()); request.settings(updatedSettingsBuilder.build());
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) { clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override @Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged); return new ClusterStateUpdateResponse(acknowledged);
} }
@Override @Override
public ClusterState execute(ClusterState currentState) throws Exception { public ClusterState execute(ClusterState currentState) throws Exception {
@ -299,7 +299,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// Set up everything, now locally create the index to see that things are ok, and apply // Set up everything, now locally create the index to see that things are ok, and apply
final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build(); final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
// create the index here (on the master) to validate it can be created, as well as adding the mapping // create the index here (on the master) to validate it can be created, as well as adding the mapping
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.EMPTY_LIST); indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
indexCreated = true; indexCreated = true;
// now add the mappings // now add the mappings
IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexService indexService = indicesService.indexServiceSafe(request.index());

View File

@ -39,7 +39,6 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Locale;
import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -71,7 +70,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
Collection<String> indices = Arrays.asList(request.indices); Collection<String> indices = Arrays.asList(request.indices);
final DeleteIndexListener listener = new DeleteIndexListener(userListener); final DeleteIndexListener listener = new DeleteIndexListener(userListener);
clusterService.submitStateUpdateTask("delete-index " + indices, Priority.URGENT, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
@Override @Override
public TimeValue timeout() { public TimeValue timeout() {

View File

@ -62,7 +62,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
} }
public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) { public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
clusterService.submitStateUpdateTask("index-aliases", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) { clusterService.submitStateUpdateTask("index-aliases", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override @Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged); return new ClusterStateUpdateResponse(acknowledged);
@ -99,7 +99,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
if (indexService == null) { if (indexService == null) {
// temporarily create the index and add mappings so we can parse the filter // temporarily create the index and add mappings so we can parse the filter
try { try {
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) { if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false); indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
} }

View File

@ -76,7 +76,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
} }
final String indicesAsString = Arrays.toString(request.indices()); final String indicesAsString = Arrays.toString(request.indices());
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) { clusterService.submitStateUpdateTask("close-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override @Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged); return new ClusterStateUpdateResponse(acknowledged);
@ -140,7 +140,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
} }
final String indicesAsString = Arrays.toString(request.indices()); final String indicesAsString = Arrays.toString(request.indices());
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) { clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override @Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged); return new ClusterStateUpdateResponse(acknowledged);

View File

@ -56,7 +56,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
} }
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) { public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", new ClusterStateUpdateTask(Priority.URGENT) {
@Override @Override
public TimeValue timeout() { public TimeValue timeout() {
@ -143,7 +143,8 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
} }
final IndexTemplateMetaData template = templateBuilder.build(); final IndexTemplateMetaData template = templateBuilder.build();
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]",
new ClusterStateUpdateTask(Priority.URGENT) {
@Override @Override
public TimeValue timeout() { public TimeValue timeout() {
@ -216,6 +217,9 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
for (Alias alias : request.aliases) { for (Alias alias : request.aliases) {
//we validate the alias only partially, as we don't know yet to which index it'll get applied to //we validate the alias only partially, as we don't know yet to which index it'll get applied to
aliasValidator.validateAliasStandalone(alias); aliasValidator.validateAliasStandalone(alias);
if (request.template.equals(alias.name())) {
throw new IllegalArgumentException("Alias [" + alias.name() + "] cannot be the same as the template pattern [" + request.template + "]");
}
} }
} }

View File

@ -218,8 +218,8 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
try { try {
// We cannot instantiate real analysis server at this point because the node might not have // We cannot instantiate real analysis server at this point because the node might not have
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it // been started yet. However, we don't really need real analyzers at this stage - so we can fake it
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST); IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.EMPTY_MAP); SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) { try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) { try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) {
@ -256,7 +256,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
}; };
public FakeAnalysisService(IndexSettings indexSettings) { public FakeAnalysisService(IndexSettings indexSettings) {
super(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP); super(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
} }
@Override @Override

View File

@ -22,28 +22,27 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MergeMappingException;
import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
import java.io.IOException;
import java.util.*; import java.util.*;
/** /**
* Service responsible for submitting mapping changes * Service responsible for submitting mapping changes
@ -53,13 +52,11 @@ public class MetaDataMappingService extends AbstractComponent {
private final ClusterService clusterService; private final ClusterService clusterService;
private final IndicesService indicesService; private final IndicesService indicesService;
// the mutex protect all the refreshOrUpdate variables! final ClusterStateTaskExecutor<RefreshTask> refreshExecutor = new RefreshTaskExecutor();
private final Object refreshOrUpdateMutex = new Object(); final ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> putMappingExecutor = new PutMappingExecutor();
private final List<MappingTask> refreshOrUpdateQueue = new ArrayList<>();
private long refreshOrUpdateInsertOrder;
private long refreshOrUpdateProcessedInsertOrder;
private final NodeServicesProvider nodeServicesProvider; private final NodeServicesProvider nodeServicesProvider;
@Inject @Inject
public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) { public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
super(settings); super(settings);
@ -68,89 +65,44 @@ public class MetaDataMappingService extends AbstractComponent {
this.nodeServicesProvider = nodeServicesProvider; this.nodeServicesProvider = nodeServicesProvider;
} }
static class MappingTask { static class RefreshTask {
final String index; final String index;
final String indexUUID; final String indexUUID;
MappingTask(String index, final String indexUUID) { RefreshTask(String index, final String indexUUID) {
this.index = index; this.index = index;
this.indexUUID = indexUUID; this.indexUUID = indexUUID;
} }
} }
static class RefreshTask extends MappingTask { class RefreshTaskExecutor implements ClusterStateTaskExecutor<RefreshTask> {
final String[] types; @Override
public BatchResult<RefreshTask> execute(ClusterState currentState, List<RefreshTask> tasks) throws Exception {
RefreshTask(String index, final String indexUUID, String[] types) { ClusterState newClusterState = executeRefresh(currentState, tasks);
super(index, indexUUID); return BatchResult.<RefreshTask>builder().successes(tasks).build(newClusterState);
this.types = types;
}
}
static class UpdateTask extends MappingTask {
final String type;
final CompressedXContent mappingSource;
final String nodeId; // null fr unknown
final ActionListener<ClusterStateUpdateResponse> listener;
UpdateTask(String index, String indexUUID, String type, CompressedXContent mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
super(index, indexUUID);
this.type = type;
this.mappingSource = mappingSource;
this.nodeId = nodeId;
this.listener = listener;
} }
} }
/** /**
* Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much * Batch method to apply all the queued refresh operations. The idea is to try and batch as much
* as possible so we won't create the same index all the time for example for the updates on the same mapping * as possible so we won't create the same index all the time for example for the updates on the same mapping
* and generate a single cluster change event out of all of those. * and generate a single cluster change event out of all of those.
*/ */
Tuple<ClusterState, List<MappingTask>> executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception { ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception {
final List<MappingTask> allTasks = new ArrayList<>();
synchronized (refreshOrUpdateMutex) {
if (refreshOrUpdateQueue.isEmpty()) {
return Tuple.tuple(currentState, allTasks);
}
// we already processed this task in a bulk manner in a previous cluster event, simply ignore
// it so we will let other tasks get in and processed ones, we will handle the queued ones
// later on in a subsequent cluster state event
if (insertionOrder < refreshOrUpdateProcessedInsertOrder) {
return Tuple.tuple(currentState, allTasks);
}
allTasks.addAll(refreshOrUpdateQueue);
refreshOrUpdateQueue.clear();
refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder;
}
if (allTasks.isEmpty()) {
return Tuple.tuple(currentState, allTasks);
}
// break down to tasks per index, so we can optimize the on demand index service creation // break down to tasks per index, so we can optimize the on demand index service creation
// to only happen for the duration of a single index processing of its respective events // to only happen for the duration of a single index processing of its respective events
Map<String, List<MappingTask>> tasksPerIndex = new HashMap<>(); Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>();
for (MappingTask task : allTasks) { for (RefreshTask task : allTasks) {
if (task.index == null) { if (task.index == null) {
logger.debug("ignoring a mapping task of type [{}] with a null index.", task); logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
} }
List<MappingTask> indexTasks = tasksPerIndex.get(task.index); tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task);
if (indexTasks == null) {
indexTasks = new ArrayList<>();
tasksPerIndex.put(task.index, indexTasks);
}
indexTasks.add(task);
} }
boolean dirty = false; boolean dirty = false;
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) { for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
String index = entry.getKey(); String index = entry.getKey();
IndexMetaData indexMetaData = mdBuilder.get(index); IndexMetaData indexMetaData = mdBuilder.get(index);
if (indexMetaData == null) { if (indexMetaData == null) {
@ -160,14 +112,17 @@ public class MetaDataMappingService extends AbstractComponent {
} }
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep // the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
// the latest (based on order) update mapping one per node // the latest (based on order) update mapping one per node
List<MappingTask> allIndexTasks = entry.getValue(); List<RefreshTask> allIndexTasks = entry.getValue();
List<MappingTask> tasks = new ArrayList<>(); boolean hasTaskWithRightUUID = false;
for (MappingTask task : allIndexTasks) { for (RefreshTask task : allIndexTasks) {
if (!indexMetaData.isSameUUID(task.indexUUID)) { if (indexMetaData.isSameUUID(task.indexUUID)) {
hasTaskWithRightUUID = true;
} else {
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task); logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
continue;
} }
tasks.add(task); }
if (hasTaskWithRightUUID == false) {
continue;
} }
// construct the actual index if needed, and make sure the relevant mappings are there // construct the actual index if needed, and make sure the relevant mappings are there
@ -175,28 +130,17 @@ public class MetaDataMappingService extends AbstractComponent {
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService == null) { if (indexService == null) {
// we need to create the index here, and add the current mapping to it, so we can merge // we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST); indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
removeIndex = true; removeIndex = true;
Set<String> typesToIntroduce = new HashSet<>(); for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
for (MappingTask task : tasks) { // don't apply the default mapping, it has been applied when the mapping was created
if (task instanceof UpdateTask) { indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
typesToIntroduce.add(((UpdateTask) task).type);
} else if (task instanceof RefreshTask) {
Collections.addAll(typesToIntroduce, ((RefreshTask) task).types);
}
}
for (String type : typesToIntroduce) {
// only add the current relevant mapping (if exists)
if (indexMetaData.getMappings().containsKey(type)) {
// don't apply the default mapping, it has been applied when the mapping was created
indexService.mapperService().merge(type, indexMetaData.getMappings().get(type).source(), false, true);
}
} }
} }
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData); IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
try { try {
boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder); boolean indexDirty = refreshIndexMapping(indexService, builder);
if (indexDirty) { if (indexDirty) {
mdBuilder.put(builder); mdBuilder.put(builder);
dirty = true; dirty = true;
@ -209,81 +153,33 @@ public class MetaDataMappingService extends AbstractComponent {
} }
if (!dirty) { if (!dirty) {
return Tuple.tuple(currentState, allTasks); return currentState;
} }
return Tuple.tuple(ClusterState.builder(currentState).metaData(mdBuilder).build(), allTasks); return ClusterState.builder(currentState).metaData(mdBuilder).build();
} }
private boolean processIndexMappingTasks(List<MappingTask> tasks, IndexService indexService, IndexMetaData.Builder builder) { private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Builder builder) {
boolean dirty = false; boolean dirty = false;
String index = indexService.index().name(); String index = indexService.index().name();
// keep track of what we already refreshed, no need to refresh it again... try {
Set<String> processedRefreshes = new HashSet<>(); List<String> updatedTypes = new ArrayList<>();
for (MappingTask task : tasks) { for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
if (task instanceof RefreshTask) { final String type = mapper.type();
RefreshTask refreshTask = (RefreshTask) task; if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
try { updatedTypes.add(type);
List<String> updatedTypes = new ArrayList<>();
for (String type : refreshTask.types) {
if (processedRefreshes.contains(type)) {
continue;
}
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
if (mapper == null) {
continue;
}
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
updatedTypes.add(type);
builder.putMapping(new MappingMetaData(mapper));
}
processedRefreshes.add(type);
}
if (updatedTypes.isEmpty()) {
continue;
}
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
dirty = true;
} catch (Throwable t) {
logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
} }
} else if (task instanceof UpdateTask) {
UpdateTask updateTask = (UpdateTask) task;
try {
String type = updateTask.type;
CompressedXContent mappingSource = updateTask.mappingSource;
MappingMetaData mappingMetaData = builder.mapping(type);
if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type);
continue;
}
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false, true);
processedRefreshes.add(type);
// if we end up with the same mapping as the original once, ignore
if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) {
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type);
continue;
}
// build the updated mapping source
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource());
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
}
builder.putMapping(new MappingMetaData(updatedMapper));
dirty = true;
} catch (Throwable t) {
logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type);
}
} else {
logger.warn("illegal state, got wrong mapping task type [{}]", task);
} }
// if a single type is not up-to-date, re-send everything
if (updatedTypes.isEmpty() == false) {
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
dirty = true;
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
builder.putMapping(new MappingMetaData(mapper));
}
}
} catch (Throwable t) {
logger.warn("[{}] failed to refresh-mapping in cluster state", t, index);
} }
return dirty; return dirty;
} }
@ -291,198 +187,198 @@ public class MetaDataMappingService extends AbstractComponent {
/** /**
* Refreshes mappings if they are not the same between original and parsed version * Refreshes mappings if they are not the same between original and parsed version
*/ */
public void refreshMapping(final String index, final String indexUUID, final String... types) { public void refreshMapping(final String index, final String indexUUID) {
final long insertOrder; final RefreshTask refreshTask = new RefreshTask(index, indexUUID);
synchronized (refreshOrUpdateMutex) { clusterService.submitStateUpdateTask("refresh-mapping [" + index + "]",
insertOrder = ++refreshOrUpdateInsertOrder; refreshTask,
refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types)); ClusterStateTaskConfig.build(Priority.HIGH),
} refreshExecutor,
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() { (source, t) -> logger.warn("failure during [{}]", t, source)
private volatile List<MappingTask> allTasks; );
}
@Override class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
public void onFailure(String source, Throwable t) { @Override
logger.warn("failure during [{}]", t, source); public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
} Set<String> indicesToClose = new HashSet<>();
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
@Override try {
public ClusterState execute(ClusterState currentState) throws Exception { // precreate incoming indices;
Tuple<ClusterState, List<MappingTask>> tuple = executeRefreshOrUpdate(currentState, insertOrder); for (PutMappingClusterStateUpdateRequest request : tasks) {
this.allTasks = tuple.v2(); // failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
return tuple.v1(); for (String index : request.indices()) {
} final IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData != null && indicesService.hasIndex(index) == false) {
@Override // if we don't have the index, we will throw exceptions later;
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { indicesToClose.add(index);
if (allTasks == null) { IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
return; // add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
}
}
}
} }
for (Object task : allTasks) { for (PutMappingClusterStateUpdateRequest request : tasks) {
if (task instanceof UpdateTask) { try {
UpdateTask uTask = (UpdateTask) task; currentState = applyRequest(currentState, request);
ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true); builder.success(request);
uTask.listener.onResponse(response); } catch (Throwable t) {
builder.failure(request, t);
}
}
return builder.build(currentState);
} finally {
for (String index : indicesToClose) {
indicesService.removeIndex(index, "created for mapping processing");
}
}
}
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
Map<String, DocumentMapper> newMappers = new HashMap<>();
Map<String, DocumentMapper> existingMappers = new HashMap<>();
for (String index : request.indices()) {
IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
} else {
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
if (existingMapper != null) {
// first, simulate
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
// if we have conflicts, throw an exception
if (mergeResult.hasConflicts()) {
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
}
} else {
// TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about
// new types all at once , which can create a false error.
// For example in MapperService we can't distinguish between a create index api call
// and a put mapping api call, so we don't which type did exist before.
// Also the order of the mappings may be backwards.
if (newMapper.parentFieldMapper().active()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
}
}
}
}
}
newMappers.put(index, newMapper);
if (existingMapper != null) {
existingMappers.put(index, existingMapper);
}
}
String mappingType = request.type();
if (mappingType == null) {
mappingType = newMappers.values().iterator().next().type();
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
final Map<String, MappingMetaData> mappings = new HashMap<>();
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
String index = entry.getKey();
// do the actual merge here on the master, and update the mapping source
DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
continue;
}
CompressedXContent existingSource = null;
if (existingMappers.containsKey(entry.getKey())) {
existingSource = existingMappers.get(entry.getKey()).mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
if (existingSource.equals(updatedSource)) {
// same source, no changes, ignore it
} else {
// use the merged mapping source
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
}
}
} else {
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
} }
} }
} }
}); if (mappings.isEmpty()) {
// no changes, return
return currentState;
}
MetaData.Builder builder = MetaData.builder(currentState.metaData());
for (String indexName : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
if (indexMetaData == null) {
throw new IndexNotFoundException(indexName);
}
MappingMetaData mappingMd = mappings.get(indexName);
if (mappingMd != null) {
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
}
}
return ClusterState.builder(currentState).metaData(builder).build();
}
} }
public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) { public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]",
request,
ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()),
putMappingExecutor,
new AckedClusterStateTaskListener() {
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) { @Override
public void onFailure(String source, Throwable t) {
@Override listener.onFailure(t);
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
return new ClusterStateUpdateResponse(acknowledged);
}
@Override
public ClusterState execute(final ClusterState currentState) throws Exception {
List<String> indicesToClose = new ArrayList<>();
try {
for (String index : request.indices()) {
if (!currentState.metaData().hasIndex(index)) {
throw new IndexNotFoundException(index);
}
} }
// pre create indices here and add mappings to them so we can merge the mappings here if needed @Override
for (String index : request.indices()) { public boolean mustAck(DiscoveryNode discoveryNode) {
if (indicesService.hasIndex(index)) { return true;
continue;
}
final IndexMetaData indexMetaData = currentState.metaData().index(index);
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
indicesToClose.add(indexMetaData.getIndex());
// make sure to add custom default mapping if exists
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
}
// only add the current relevant mapping (if exists)
if (indexMetaData.getMappings().containsKey(request.type())) {
indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
}
} }
Map<String, DocumentMapper> newMappers = new HashMap<>(); @Override
Map<String, DocumentMapper> existingMappers = new HashMap<>(); public void onAllNodesAcked(@Nullable Throwable t) {
for (String index : request.indices()) { listener.onResponse(new ClusterStateUpdateResponse(true));
IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
} else {
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
if (existingMapper != null) {
// first, simulate
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
// if we have conflicts, throw an exception
if (mergeResult.hasConflicts()) {
throw new MergeMappingException(mergeResult.buildConflicts());
}
} else {
// TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about
// new types all at once , which can create a false error.
// For example in MapperService we can't distinguish between a create index api call
// and a put mapping api call, so we don't which type did exist before.
// Also the order of the mappings may be backwards.
if (newMapper.parentFieldMapper().active()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
}
}
}
}
}
newMappers.put(index, newMapper);
if (existingMapper != null) {
existingMappers.put(index, existingMapper);
}
} }
String mappingType = request.type(); @Override
if (mappingType == null) { public void onAckTimeout() {
mappingType = newMappers.values().iterator().next().type(); listener.onResponse(new ClusterStateUpdateResponse(false));
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
} }
final Map<String, MappingMetaData> mappings = new HashMap<>(); @Override
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) { public TimeValue ackTimeout() {
String index = entry.getKey(); return request.ackTimeout();
// do the actual merge here on the master, and update the mapping source
DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
continue;
}
CompressedXContent existingSource = null;
if (existingMappers.containsKey(entry.getKey())) {
existingSource = existingMappers.get(entry.getKey()).mappingSource();
}
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
if (existingSource.equals(updatedSource)) {
// same source, no changes, ignore it
} else {
// use the merged mapping source
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
}
}
} else {
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
}
}
} }
});
if (mappings.isEmpty()) {
// no changes, return
return currentState;
}
MetaData.Builder builder = MetaData.builder(currentState.metaData());
for (String indexName : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
if (indexMetaData == null) {
throw new IndexNotFoundException(indexName);
}
MappingMetaData mappingMd = mappings.get(indexName);
if (mappingMd != null) {
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
}
}
return ClusterState.builder(currentState).metaData(builder).build();
} finally {
for (String index : indicesToClose) {
indicesService.removeIndex(index, "created for mapping processing");
}
}
}
});
} }
} }

View File

@ -24,11 +24,7 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable;
@ -44,13 +40,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.settings.IndexDynamicSettings; import org.elasticsearch.index.settings.IndexDynamicSettings;
import java.util.ArrayList; import java.util.*;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@ -219,7 +209,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
} }
final Settings openSettings = updatedSettingsBuilder.build(); final Settings openSettings = updatedSettingsBuilder.build();
clusterService.submitStateUpdateTask("update-settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) { clusterService.submitStateUpdateTask("update-settings",
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override @Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
@ -334,7 +325,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) { public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
clusterService.submitStateUpdateTask("update-index-compatibility-versions", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) { clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
@Override @Override
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {

View File

@ -147,7 +147,7 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
return; return;
} }
logger.trace("rerouting {}", reason); logger.trace("rerouting {}", reason);
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", Priority.HIGH, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
rerouting.set(false); rerouting.set(false);

View File

@ -314,12 +314,12 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
public RoutingTableDiff(RoutingTable before, RoutingTable after) { public RoutingTableDiff(RoutingTable before, RoutingTable after) {
version = after.version; version = after.version;
indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting); indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting, DiffableUtils.getStringKeySerializer());
} }
public RoutingTableDiff(StreamInput in) throws IOException { public RoutingTableDiff(StreamInput in) throws IOException {
version = in.readLong(); version = in.readLong();
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, IndexRoutingTable.PROTO); indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexRoutingTable.PROTO);
} }
@Override @Override

View File

@ -27,7 +27,14 @@ import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@ -39,6 +46,8 @@ import org.elasticsearch.common.settings.Settings;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function; import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -79,24 +88,83 @@ public class AllocationService extends AbstractComponent {
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo()); StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
boolean changed = applyStartedShards(routingNodes, startedShards); boolean changed = applyStartedShards(routingNodes, startedShards);
if (!changed) { if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable()); return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
} }
shardsAllocators.applyStartedShards(allocation); shardsAllocators.applyStartedShards(allocation);
if (withReroute) { if (withReroute) {
reroute(allocation); reroute(allocation);
} }
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()); final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString()); String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
logClusterHealthStateChange( logClusterHealthStateChange(
new ClusterStateHealth(clusterState), new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.metaData(), routingTable), new ClusterStateHealth(clusterState.metaData(), result.routingTable()),
"shards started [" + startedShardsAsString + "] ..." "shards started [" + startedShardsAsString + "] ..."
); );
return result; return result;
} }
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) {
return buildChangedResult(metaData, routingNodes, new RoutingExplanations());
}
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes, RoutingExplanations explanations) {
final RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build();
MetaData newMetaData = updateMetaDataWithRoutingTable(metaData,routingTable);
return new RoutingAllocation.Result(true, routingTable.validateRaiseException(newMetaData), newMetaData, explanations);
}
/**
* Updates the current {@link MetaData} based on the newly created {@link RoutingTable}.
*
* @param currentMetaData {@link MetaData} object from before the routing table was changed.
* @param newRoutingTable new {@link RoutingTable} created by the allocation change
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
*/
static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) {
// make sure index meta data and routing tables are in sync w.r.t active allocation ids
MetaData.Builder metaDataBuilder = null;
for (IndexRoutingTable indexRoutingTable : newRoutingTable) {
final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex());
if (indexMetaData == null) {
throw new IllegalStateException("no metadata found for index [" + indexRoutingTable.index() + "]");
}
IndexMetaData.Builder indexMetaDataBuilder = null;
for (IndexShardRoutingTable shardRoutings : indexRoutingTable) {
Set<String> activeAllocationIds = shardRoutings.activeShards().stream()
.map(ShardRouting::allocationId)
.filter(Objects::nonNull)
.map(AllocationId::getId)
.collect(Collectors.toSet());
// only update active allocation ids if there is an active shard
if (activeAllocationIds.isEmpty() == false) {
// get currently stored allocation ids
Set<String> storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id());
if (activeAllocationIds.equals(storedAllocationIds) == false) {
if (indexMetaDataBuilder == null) {
indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
}
indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds);
}
}
}
if (indexMetaDataBuilder != null) {
if (metaDataBuilder == null) {
metaDataBuilder = MetaData.builder(currentMetaData);
}
metaDataBuilder.put(indexMetaDataBuilder);
}
}
if (metaDataBuilder != null) {
return metaDataBuilder.build();
} else {
return currentMetaData;
}
}
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) { public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null))); return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
} }
@ -117,16 +185,15 @@ public class AllocationService extends AbstractComponent {
System.nanoTime(), System.currentTimeMillis())); System.nanoTime(), System.currentTimeMillis()));
} }
if (!changed) { if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable()); return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
} }
shardsAllocators.applyFailedShards(allocation); shardsAllocators.applyFailedShards(allocation);
reroute(allocation); reroute(allocation);
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()); final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString()); String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
logClusterHealthStateChange( logClusterHealthStateChange(
new ClusterStateHealth(clusterState), new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), routingTable), new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
"shards failed [" + failedShardsAsString + "] ..." "shards failed [" + failedShardsAsString + "] ..."
); );
return result; return result;
@ -169,11 +236,10 @@ public class AllocationService extends AbstractComponent {
// the assumption is that commands will move / act on shards (or fail through exceptions) // the assumption is that commands will move / act on shards (or fail through exceptions)
// so, there will always be shard "movements", so no need to check on reroute // so, there will always be shard "movements", so no need to check on reroute
reroute(allocation); reroute(allocation);
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()); RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations);
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable, explanations);
logClusterHealthStateChange( logClusterHealthStateChange(
new ClusterStateHealth(clusterState), new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), routingTable), new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
"reroute commands" "reroute commands"
); );
return result; return result;
@ -200,13 +266,12 @@ public class AllocationService extends AbstractComponent {
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime()); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
allocation.debugDecision(debug); allocation.debugDecision(debug);
if (!reroute(allocation)) { if (!reroute(allocation)) {
return new RoutingAllocation.Result(false, clusterState.routingTable()); return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
} }
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData()); RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
logClusterHealthStateChange( logClusterHealthStateChange(
new ClusterStateHealth(clusterState), new ClusterStateHealth(clusterState),
new ClusterStateHealth(clusterState.getMetaData(), routingTable), new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
reason reason
); );
return result; return result;

View File

@ -52,29 +52,33 @@ public class RoutingAllocation {
private final RoutingTable routingTable; private final RoutingTable routingTable;
private final MetaData metaData;
private RoutingExplanations explanations = new RoutingExplanations(); private RoutingExplanations explanations = new RoutingExplanations();
/** /**
* Creates a new {@link RoutingAllocation.Result} * Creates a new {@link RoutingAllocation.Result}
*
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed * @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param routingTable the {@link RoutingTable} this Result references * @param routingTable the {@link RoutingTable} this Result references
* @param metaData the {@link MetaData} this Result references
*/ */
public Result(boolean changed, RoutingTable routingTable) { public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
this.changed = changed; this.changed = changed;
this.routingTable = routingTable; this.routingTable = routingTable;
this.metaData = metaData;
} }
/** /**
* Creates a new {@link RoutingAllocation.Result} * Creates a new {@link RoutingAllocation.Result}
*
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed * @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
* @param routingTable the {@link RoutingTable} this Result references * @param routingTable the {@link RoutingTable} this Result references
* @param metaData the {@link MetaData} this Result references
* @param explanations Explanation for the reroute actions * @param explanations Explanation for the reroute actions
*/ */
public Result(boolean changed, RoutingTable routingTable, RoutingExplanations explanations) { public Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
this.changed = changed; this.changed = changed;
this.routingTable = routingTable; this.routingTable = routingTable;
this.metaData = metaData;
this.explanations = explanations; this.explanations = explanations;
} }
@ -85,6 +89,14 @@ public class RoutingAllocation {
return this.changed; return this.changed;
} }
/**
* Get the {@link MetaData} referenced by this result
* @return referenced {@link MetaData}
*/
public MetaData metaData() {
return metaData;
}
/** /**
* Get the {@link RoutingTable} referenced by this result * Get the {@link RoutingTable} referenced by this result
* @return referenced {@link RoutingTable} * @return referenced {@link RoutingTable}

View File

@ -20,16 +20,8 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.*;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterState.Builder; import org.elasticsearch.cluster.ClusterState.Builder;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.TimeoutClusterStateListener;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
@ -41,6 +33,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
@ -49,13 +42,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.*;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.discovery.DiscoveryService;
@ -63,18 +50,10 @@ import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.ArrayList; import java.util.*;
import java.util.Collection; import java.util.concurrent.*;
import java.util.Iterator; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.List; import java.util.stream.Collectors;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
@ -111,6 +90,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>();
private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>();
private final Map<ClusterStateTaskExecutor, List<UpdateTask>> updateTasksPerExecutor = new HashMap<>();
// TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API // TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API
private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>(); private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>();
private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners); private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners);
@ -289,30 +269,33 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
@Override @Override
public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) { public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
submitStateUpdateTask(source, Priority.NORMAL, updateTask); submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask);
} }
@Override @Override
public void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask) { public <T> void submitStateUpdateTask(final String source, final T task,
final ClusterStateTaskConfig config,
final ClusterStateTaskExecutor<T> executor,
final ClusterStateTaskListener listener
) {
if (!lifecycle.started()) { if (!lifecycle.started()) {
return; return;
} }
try { try {
final UpdateTask task = new UpdateTask(source, priority, updateTask); final UpdateTask<T> updateTask = new UpdateTask<>(source, task, config, executor, listener);
if (updateTask.timeout() != null) {
updateTasksExecutor.execute(task, threadPool.scheduler(), updateTask.timeout(), new Runnable() { synchronized (updateTasksPerExecutor) {
@Override updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()).add(updateTask);
public void run() { }
threadPool.generic().execute(new Runnable() {
@Override if (config.timeout() != null) {
public void run() { updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
updateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(updateTask.timeout(), task.source())); if (updateTask.processed.getAndSet(true) == false) {
} listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
}); }}));
}
});
} else { } else {
updateTasksExecutor.execute(task); updateTasksExecutor.execute(updateTask);
} }
} catch (EsRejectedExecutionException e) { } catch (EsRejectedExecutionException e) {
// ignore cases where we are shutting down..., there is really nothing interesting // ignore cases where we are shutting down..., there is really nothing interesting
@ -379,188 +362,238 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
class UpdateTask extends SourcePrioritizedRunnable { <T> void runTasksForExecutor(ClusterStateTaskExecutor<T> executor) {
final ArrayList<UpdateTask<T>> toExecute = new ArrayList<>();
public final ClusterStateUpdateTask updateTask; final ArrayList<String> sources = new ArrayList<>();
synchronized (updateTasksPerExecutor) {
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { List<UpdateTask> pending = updateTasksPerExecutor.remove(executor);
super(priority, source); if (pending != null) {
this.updateTask = updateTask; for (UpdateTask<T> task : pending) {
if (task.processed.getAndSet(true) == false) {
logger.trace("will process [{}]", task.source);
toExecute.add(task);
sources.add(task.source);
} else {
logger.trace("skipping [{}], already processed", task.source);
}
}
}
}
if (toExecute.isEmpty()) {
return;
}
final String source = Strings.collectionToCommaDelimitedString(sources);
if (!lifecycle.started()) {
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
return;
}
logger.debug("processing [{}]: execute", source);
ClusterState previousClusterState = clusterState;
if (!previousClusterState.nodes().localNodeMaster() && executor.runOnlyOnMaster()) {
logger.debug("failing [{}]: local node is no longer master", source);
toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source));
return;
}
ClusterStateTaskExecutor.BatchResult<T> batchResult;
long startTimeNS = System.nanoTime();
try {
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
batchResult = executor.execute(previousClusterState, inputs);
} catch (Throwable e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
logger.trace(sb.toString(), e);
}
warnAboutSlowTaskIfNeeded(executionTime, source);
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
} }
@Override assert batchResult.executionResults != null;
public void run() {
if (!lifecycle.started()) {
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
return;
}
logger.debug("processing [{}]: execute", source);
ClusterState previousClusterState = clusterState;
if (!previousClusterState.nodes().localNodeMaster() && updateTask.runOnlyOnMaster()) {
logger.debug("failing [{}]: local node is no longer master", source);
updateTask.onNoLongerMaster(source);
return;
}
ClusterState newClusterState;
long startTimeNS = System.nanoTime();
try {
newClusterState = updateTask.execute(previousClusterState);
} catch (Throwable e) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
sb.append(previousClusterState.nodes().prettyPrint());
sb.append(previousClusterState.routingTable().prettyPrint());
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
logger.trace(sb.toString(), e);
}
warnAboutSlowTaskIfNeeded(executionTime, source);
updateTask.onFailure(source, e);
return;
}
if (previousClusterState == newClusterState) { ClusterState newClusterState = batchResult.resultingState;
if (updateTask instanceof AckedClusterStateUpdateTask) { final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
// fail all tasks that have failed and extract those that are waiting for results
for (UpdateTask<T> updateTask : toExecute) {
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
final ClusterStateTaskExecutor.TaskResult executionResult =
batchResult.executionResults.get(updateTask.task);
executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex));
}
if (previousClusterState == newClusterState) {
for (UpdateTask<T> task : proccessedListeners) {
if (task.listener instanceof AckedClusterStateTaskListener) {
//no need to wait for ack if nothing changed, the update can be counted as acknowledged //no need to wait for ack if nothing changed, the update can be counted as acknowledged
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null); ((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null);
} }
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState); task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source);
return;
} }
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, source);
return;
}
try { try {
Discovery.AckListener ackListener = new NoOpAckListener(); ArrayList<Discovery.AckListener> ackListeners = new ArrayList<>();
if (newClusterState.nodes().localNodeMaster()) { if (newClusterState.nodes().localNodeMaster()) {
// only the master controls the version numbers // only the master controls the version numbers
Builder builder = ClusterState.builder(newClusterState).incrementVersion(); Builder builder = ClusterState.builder(newClusterState).incrementVersion();
if (previousClusterState.routingTable() != newClusterState.routingTable()) { if (previousClusterState.routingTable() != newClusterState.routingTable()) {
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build()); builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build());
} }
if (previousClusterState.metaData() != newClusterState.metaData()) { if (previousClusterState.metaData() != newClusterState.metaData()) {
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1)); builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
} }
newClusterState = builder.build(); newClusterState = builder.build();
for (UpdateTask<T> task : proccessedListeners) {
if (updateTask instanceof AckedClusterStateUpdateTask) { if (task.listener instanceof AckedClusterStateTaskListener) {
final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask; final AckedClusterStateTaskListener ackedListener = (AckedClusterStateTaskListener) task.listener;
if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) { if (ackedListener.ackTimeout() == null || ackedListener.ackTimeout().millis() == 0) {
ackedUpdateTask.onAckTimeout(); ackedListener.onAckTimeout();
} else { } else {
try { try {
ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool); ackListeners.add(new AckCountDownListener(ackedListener, newClusterState.version(), newClusterState.nodes(), threadPool));
} catch (EsRejectedExecutionException ex) { } catch (EsRejectedExecutionException ex) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex); logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
} }
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started //timeout straightaway, otherwise we could wait forever as the timeout thread has not started
ackedUpdateTask.onAckTimeout(); ackedListener.onAckTimeout();
} }
} }
} }
} }
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
sb.append(newClusterState.prettyPrint());
logger.trace(sb.toString());
} else if (logger.isDebugEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
}
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
// new cluster state, notify all listeners
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
String summary = nodesDelta.shortSummary();
if (summary.length() > 0) {
logger.info("{}, reason: {}", summary, source);
}
}
// TODO, do this in parallel (and wait)
for (DiscoveryNode node : nodesDelta.addedNodes()) {
if (!nodeRequiresConnection(node)) {
continue;
}
try {
transportService.connectToNode(node);
} catch (Throwable e) {
// the fault detection will detect it as failed as well
logger.warn("failed to connect to node [" + node + "]", e);
}
}
// if we are the master, publish the new state to all nodes
// we publish here before we send a notification to all the listeners, since if it fails
// we don't want to notify
if (newClusterState.nodes().localNodeMaster()) {
logger.debug("publishing cluster state version [{}]", newClusterState.version());
try {
discoveryService.publish(clusterChangedEvent, ackListener);
} catch (Discovery.FailedToCommitClusterStateException t) {
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
updateTask.onFailure(source, t);
return;
}
}
// update the current cluster state
clusterState = newClusterState;
logger.debug("set local cluster state to version {}", newClusterState.version());
for (ClusterStateListener listener : preAppliedListeners) {
try {
listener.clusterChanged(clusterChangedEvent);
} catch (Exception ex) {
logger.warn("failed to notify ClusterStateListener", ex);
}
}
for (DiscoveryNode node : nodesDelta.removedNodes()) {
try {
transportService.disconnectFromNode(node);
} catch (Throwable e) {
logger.warn("failed to disconnect to node [" + node + "]", e);
}
}
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
for (ClusterStateListener listener : postAppliedListeners) {
try {
listener.clusterChanged(clusterChangedEvent);
} catch (Exception ex) {
logger.warn("failed to notify ClusterStateListener", ex);
}
}
//manual ack only from the master at the end of the publish
if (newClusterState.nodes().localNodeMaster()) {
try {
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
} catch (Throwable t) {
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
}
}
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
warnAboutSlowTaskIfNeeded(executionTime, source);
} catch (Throwable t) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.getRoutingNodes().prettyPrint());
logger.warn(sb.toString(), t);
// TODO: do we want to call updateTask.onFailure here?
} }
final Discovery.AckListener ackListener = new DelegetingAckListener(ackListeners);
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
if (logger.isTraceEnabled()) {
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
sb.append(newClusterState.prettyPrint());
logger.trace(sb.toString());
} else if (logger.isDebugEnabled()) {
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
}
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
// new cluster state, notify all listeners
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
String summary = nodesDelta.shortSummary();
if (summary.length() > 0) {
logger.info("{}, reason: {}", summary, source);
}
}
// TODO, do this in parallel (and wait)
for (DiscoveryNode node : nodesDelta.addedNodes()) {
if (!nodeRequiresConnection(node)) {
continue;
}
try {
transportService.connectToNode(node);
} catch (Throwable e) {
// the fault detection will detect it as failed as well
logger.warn("failed to connect to node [" + node + "]", e);
}
}
// if we are the master, publish the new state to all nodes
// we publish here before we send a notification to all the listeners, since if it fails
// we don't want to notify
if (newClusterState.nodes().localNodeMaster()) {
logger.debug("publishing cluster state version [{}]", newClusterState.version());
try {
discoveryService.publish(clusterChangedEvent, ackListener);
} catch (Discovery.FailedToCommitClusterStateException t) {
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
return;
}
}
// update the current cluster state
clusterState = newClusterState;
logger.debug("set local cluster state to version {}", newClusterState.version());
for (ClusterStateListener listener : preAppliedListeners) {
try {
listener.clusterChanged(clusterChangedEvent);
} catch (Exception ex) {
logger.warn("failed to notify ClusterStateListener", ex);
}
}
for (DiscoveryNode node : nodesDelta.removedNodes()) {
try {
transportService.disconnectFromNode(node);
} catch (Throwable e) {
logger.warn("failed to disconnect to node [" + node + "]", e);
}
}
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
for (ClusterStateListener listener : postAppliedListeners) {
try {
listener.clusterChanged(clusterChangedEvent);
} catch (Exception ex) {
logger.warn("failed to notify ClusterStateListener", ex);
}
}
//manual ack only from the master at the end of the publish
if (newClusterState.nodes().localNodeMaster()) {
try {
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
} catch (Throwable t) {
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
}
}
for (UpdateTask<T> task : proccessedListeners) {
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
}
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
warnAboutSlowTaskIfNeeded(executionTime, source);
} catch (Throwable t) {
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
sb.append(newClusterState.nodes().prettyPrint());
sb.append(newClusterState.routingTable().prettyPrint());
sb.append(newClusterState.getRoutingNodes().prettyPrint());
logger.warn(sb.toString(), t);
// TODO: do we want to call updateTask.onFailure here?
}
}
class UpdateTask<T> extends SourcePrioritizedRunnable {
public final T task;
public final ClusterStateTaskConfig config;
public final ClusterStateTaskExecutor<T> executor;
public final ClusterStateTaskListener listener;
public final AtomicBoolean processed = new AtomicBoolean();
UpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
super(config.priority(), source);
this.task = task;
this.config = config;
this.executor = executor;
this.listener = listener;
}
@Override
public void run() {
runTasksForExecutor(executor);
} }
} }
@ -729,13 +762,24 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
private static class NoOpAckListener implements Discovery.AckListener { private static class DelegetingAckListener implements Discovery.AckListener {
final private List<Discovery.AckListener> listeners;
private DelegetingAckListener(List<Discovery.AckListener> listeners) {
this.listeners = listeners;
}
@Override @Override
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
for (Discovery.AckListener listener : listeners) {
listener.onNodeAck(node, t);
}
} }
@Override @Override
public void onTimeout() { public void onTimeout() {
throw new UnsupportedOperationException("no timeout delegation");
} }
} }
@ -743,20 +787,20 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class); private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
private final AckedClusterStateUpdateTask ackedUpdateTask; private final AckedClusterStateTaskListener ackedTaskListener;
private final CountDown countDown; private final CountDown countDown;
private final DiscoveryNodes nodes; private final DiscoveryNodes nodes;
private final long clusterStateVersion; private final long clusterStateVersion;
private final Future<?> ackTimeoutCallback; private final Future<?> ackTimeoutCallback;
private Throwable lastFailure; private Throwable lastFailure;
AckCountDownListener(AckedClusterStateUpdateTask ackedUpdateTask, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) { AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
this.ackedUpdateTask = ackedUpdateTask; this.ackedTaskListener = ackedTaskListener;
this.clusterStateVersion = clusterStateVersion; this.clusterStateVersion = clusterStateVersion;
this.nodes = nodes; this.nodes = nodes;
int countDown = 0; int countDown = 0;
for (DiscoveryNode node : nodes) { for (DiscoveryNode node : nodes) {
if (ackedUpdateTask.mustAck(node)) { if (ackedTaskListener.mustAck(node)) {
countDown++; countDown++;
} }
} }
@ -764,7 +808,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
countDown = Math.max(1, countDown); countDown = Math.max(1, countDown);
logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion); logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
this.countDown = new CountDown(countDown); this.countDown = new CountDown(countDown);
this.ackTimeoutCallback = threadPool.schedule(ackedUpdateTask.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() { this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
@Override @Override
public void run() { public void run() {
onTimeout(); onTimeout();
@ -774,7 +818,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
@Override @Override
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
if (!ackedUpdateTask.mustAck(node)) { if (!ackedTaskListener.mustAck(node)) {
//we always wait for the master ack anyway //we always wait for the master ack anyway
if (!node.equals(nodes.masterNode())) { if (!node.equals(nodes.masterNode())) {
return; return;
@ -790,7 +834,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
if (countDown.countDown()) { if (countDown.countDown()) {
logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion); logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion);
FutureUtils.cancel(ackTimeoutCallback); FutureUtils.cancel(ackTimeoutCallback);
ackedUpdateTask.onAllNodesAcked(lastFailure); ackedTaskListener.onAllNodesAcked(lastFailure);
} }
} }
@ -798,7 +842,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
public void onTimeout() { public void onTimeout() {
if (countDown.fastForward()) { if (countDown.fastForward()) {
logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion); logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion);
ackedUpdateTask.onAckTimeout(); ackedTaskListener.onAckTimeout();
} }
} }
} }
@ -810,5 +854,4 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold; InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
} }
} }
} }

View File

@ -65,8 +65,8 @@ public class MacAddressProvider {
byte[] address = null; byte[] address = null;
try { try {
address = getMacAddress(); address = getMacAddress();
} catch( SocketException se ) { } catch (Throwable t) {
logger.warn("Unable to get mac address, will use a dummy address", se); logger.warn("Unable to get mac address, will use a dummy address", t);
// address will be set below // address will be set below
} }

View File

@ -116,7 +116,7 @@ public abstract class Terminal {
} }
public void printError(Throwable t) { public void printError(Throwable t) {
printError("%s", t.getMessage()); printError("%s", t.toString());
if (isDebugEnabled) { if (isDebugEnabled) {
printStackTrace(t); printStackTrace(t);
} }

View File

@ -23,7 +23,6 @@ import org.apache.lucene.store.IndexInput;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.deflate.DeflateCompressor; import org.elasticsearch.common.compress.deflate.DeflateCompressor;
import org.elasticsearch.common.compress.lzf.LZFCompressor;
import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -42,7 +41,6 @@ public class CompressorFactory {
static { static {
compressors = new Compressor[] { compressors = new Compressor[] {
new LZFCompressor(),
new DeflateCompressor() new DeflateCompressor()
}; };
defaultCompressor = new DeflateCompressor(); defaultCompressor = new DeflateCompressor();
@ -82,12 +80,23 @@ public class CompressorFactory {
XContentType contentType = XContentFactory.xContentType(bytes); XContentType contentType = XContentFactory.xContentType(bytes);
if (contentType == null) { if (contentType == null) {
if (isAncient(bytes)) {
throw new IllegalStateException("unsupported compression: index was created before v2.0.0.beta1 and wasn't upgraded?");
}
throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes"); throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes");
} }
return null; return null;
} }
/** true if the bytes were compressed with LZF: only used before elasticsearch 2.0 */
private static boolean isAncient(BytesReference bytes) {
return bytes.length() >= 3 &&
bytes.get(0) == 'Z' &&
bytes.get(1) == 'V' &&
(bytes.get(2) == 0 || bytes.get(2) == 1);
}
public static Compressor compressor(ChannelBuffer buffer) { public static Compressor compressor(ChannelBuffer buffer) {
for (Compressor compressor : compressors) { for (Compressor compressor : compressors) {
if (compressor.isCompressed(buffer)) { if (compressor.isCompressed(buffer)) {

View File

@ -1,80 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.compress.lzf;
import com.ning.compress.lzf.ChunkDecoder;
import com.ning.compress.lzf.LZFChunk;
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.store.IndexInput;
import org.elasticsearch.common.compress.CompressedIndexInput;
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import java.io.IOException;
import java.util.Arrays;
/**
*/
@Deprecated
public class LZFCompressedIndexInput extends CompressedIndexInput {
private final ChunkDecoder decoder;
// scratch area buffer
private byte[] inputBuffer;
public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
super(in);
this.decoder = decoder;
this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
this.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
}
@Override
protected void readHeader(IndexInput in) throws IOException {
byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
in.readBytes(header, 0, header.length, false);
if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
}
}
@Override
protected int uncompress(IndexInput in, byte[] out) throws IOException {
return decoder.decodeChunk(new InputStreamIndexInput(in, Long.MAX_VALUE), inputBuffer, out);
}
@Override
protected void doClose() throws IOException {
// nothing to do here...
}
@Override
public IndexInput clone() {
LZFCompressedIndexInput cloned = (LZFCompressedIndexInput) super.clone();
cloned.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
return cloned;
}
@Override
public IndexInput slice(String description, long offset, long length) throws IOException {
return BufferedIndexInput.wrap(description, this, offset, length);
}
}

View File

@ -1,73 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.compress.lzf;
import com.ning.compress.BufferRecycler;
import com.ning.compress.lzf.ChunkDecoder;
import com.ning.compress.lzf.LZFChunk;
import org.elasticsearch.common.compress.CompressedStreamInput;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
/**
*/
public class LZFCompressedStreamInput extends CompressedStreamInput {
private final BufferRecycler recycler;
private final ChunkDecoder decoder;
// scratch area buffer
private byte[] inputBuffer;
public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException {
super(in);
this.recycler = BufferRecycler.instance();
this.decoder = decoder;
this.uncompressed = recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
this.inputBuffer = recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
}
@Override
public void readHeader(StreamInput in) throws IOException {
// nothing to do here, each chunk has a header
}
@Override
public int uncompress(StreamInput in, byte[] out) throws IOException {
return decoder.decodeChunk(in, inputBuffer, out);
}
@Override
protected void doClose() throws IOException {
byte[] buf = inputBuffer;
if (buf != null) {
inputBuffer = null;
recycler.releaseInputBuffer(buf);
}
buf = uncompressed;
if (buf != null) {
uncompressed = null;
recycler.releaseDecodeBuffer(uncompressed);
}
}
}

View File

@ -1,100 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.compress.lzf;
import com.ning.compress.lzf.ChunkDecoder;
import com.ning.compress.lzf.LZFChunk;
import com.ning.compress.lzf.util.ChunkDecoderFactory;
import org.apache.lucene.store.IndexInput;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedIndexInput;
import org.elasticsearch.common.compress.Compressor;
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.Loggers;
import org.jboss.netty.buffer.ChannelBuffer;
import java.io.IOException;
/**
* @deprecated Use {@link DeflateCompressor} instead
*/
@Deprecated
public class LZFCompressor implements Compressor {
static final byte[] LUCENE_HEADER = {'L', 'Z', 'F', 0};
private ChunkDecoder decoder;
public LZFCompressor() {
this.decoder = ChunkDecoderFactory.safeInstance();
Loggers.getLogger(LZFCompressor.class).debug("using decoder[{}] ", this.decoder.getClass().getSimpleName());
}
@Override
public boolean isCompressed(BytesReference bytes) {
return bytes.length() >= 3 &&
bytes.get(0) == LZFChunk.BYTE_Z &&
bytes.get(1) == LZFChunk.BYTE_V &&
(bytes.get(2) == LZFChunk.BLOCK_TYPE_COMPRESSED || bytes.get(2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
}
@Override
public boolean isCompressed(ChannelBuffer buffer) {
int offset = buffer.readerIndex();
return buffer.readableBytes() >= 3 &&
buffer.getByte(offset) == LZFChunk.BYTE_Z &&
buffer.getByte(offset + 1) == LZFChunk.BYTE_V &&
(buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_COMPRESSED || buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
}
@Override
public boolean isCompressed(IndexInput in) throws IOException {
long currentPointer = in.getFilePointer();
// since we have some metdata before the first compressed header, we check on our specific header
if (in.length() - currentPointer < (LUCENE_HEADER.length)) {
return false;
}
for (int i = 0; i < LUCENE_HEADER.length; i++) {
if (in.readByte() != LUCENE_HEADER[i]) {
in.seek(currentPointer);
return false;
}
}
in.seek(currentPointer);
return true;
}
@Override
public StreamInput streamInput(StreamInput in) throws IOException {
return new LZFCompressedStreamInput(in, decoder);
}
@Override
public StreamOutput streamOutput(StreamOutput out) throws IOException {
throw new UnsupportedOperationException("LZF is only here for back compat, no write support");
}
@Override
public CompressedIndexInput indexInput(IndexInput in) throws IOException {
return new LZFCompressedIndexInput(in, decoder);
}
}

View File

@ -34,8 +34,6 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
protected boolean translated = false;
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();

View File

@ -89,6 +89,4 @@ public class MultiPolygonBuilder extends ShapeBuilder {
return new XShapeCollection<>(shapes, SPATIAL_CONTEXT); return new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
//note: ShapeCollection is probably faster than a Multi* geom. //note: ShapeCollection is probably faster than a Multi* geom.
} }
} }

View File

@ -38,6 +38,7 @@ import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.concurrent.atomic.AtomicBoolean;
/** /**
* The {@link PolygonBuilder} implements the groundwork to create polygons. This contains * The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
@ -141,9 +142,10 @@ public class PolygonBuilder extends ShapeBuilder {
Edge[] edges = new Edge[numEdges]; Edge[] edges = new Edge[numEdges];
Edge[] holeComponents = new Edge[holes.size()]; Edge[] holeComponents = new Edge[holes.size()];
int offset = createEdges(0, orientation, shell, null, edges, 0); final AtomicBoolean translated = new AtomicBoolean(false);
int offset = createEdges(0, orientation, shell, null, edges, 0, translated);
for (int i = 0; i < holes.size(); i++) { for (int i = 0; i < holes.size(); i++) {
int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset); int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset, translated);
holeComponents[i] = edges[offset]; holeComponents[i] = edges[offset];
offset += length; offset += length;
} }
@ -508,14 +510,157 @@ public class PolygonBuilder extends ShapeBuilder {
} }
private static int createEdges(int component, Orientation orientation, LineStringBuilder shell, private static int createEdges(int component, Orientation orientation, LineStringBuilder shell,
LineStringBuilder hole, LineStringBuilder hole, Edge[] edges, int offset, final AtomicBoolean translated) {
Edge[] edges, int offset) {
// inner rings (holes) have an opposite direction than the outer rings // inner rings (holes) have an opposite direction than the outer rings
// XOR will invert the orientation for outer ring cases (Truth Table:, T/T = F, T/F = T, F/T = T, F/F = F) // XOR will invert the orientation for outer ring cases (Truth Table:, T/T = F, T/F = T, F/T = T, F/F = F)
boolean direction = (component == 0 ^ orientation == Orientation.RIGHT); boolean direction = (component == 0 ^ orientation == Orientation.RIGHT);
// set the points array accordingly (shell or hole) // set the points array accordingly (shell or hole)
Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false); Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false);
Edge.ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1); ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1, translated);
return points.length-1; return points.length-1;
} }
/**
* Create a connected list of a list of coordinates
*
* @param points
* array of point
* @param offset
* index of the first point
* @param length
* number of points
* @return Array of edges
*/
private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
// calculate the direction of the points:
// find the point a the top of the set and check its
// neighbors orientation. So direction is equivalent
// to clockwise/counterclockwise
final int top = top(points, offset, length);
final int prev = (offset + ((top + length - 1) % length));
final int next = (offset + ((top + 1) % length));
boolean orientation = points[offset + prev].x > points[offset + next].x;
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
// thus if orientation is computed as cw, the logic will translate points across dateline
// and convert to a right handed system
// compute the bounding box and calculate range
double[] range = range(points, offset, length);
final double rng = range[1] - range[0];
// translate the points if the following is true
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
// (translation would result in a collapsed poly)
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
boolean incorrectOrientation = component == 0 && handedness != orientation;
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (translated.get() && component != 0)) {
translate(points);
// flip the translation bit if the shell is being translated
if (component == 0) {
translated.set(true);
}
// correct the orientation post translation (ccw for shell, cw for holes)
if (component == 0 || (component != 0 && handedness == orientation)) {
orientation = !orientation;
}
}
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
}
private static final int top(Coordinate[] points, int offset, int length) {
int top = 0; // we start at 1 here since top points to 0
for (int i = 1; i < length; i++) {
if (points[offset + i].y < points[offset + top].y) {
top = i;
} else if (points[offset + i].y == points[offset + top].y) {
if (points[offset + i].x < points[offset + top].x) {
top = i;
}
}
}
return top;
}
private static final double[] range(Coordinate[] points, int offset, int length) {
double minX = points[0].x;
double maxX = points[0].x;
double minY = points[0].y;
double maxY = points[0].y;
// compute the bounding coordinates (@todo: cleanup brute force)
for (int i = 1; i < length; ++i) {
if (points[offset + i].x < minX) {
minX = points[offset + i].x;
}
if (points[offset + i].x > maxX) {
maxX = points[offset + i].x;
}
if (points[offset + i].y < minY) {
minY = points[offset + i].y;
}
if (points[offset + i].y > maxY) {
maxY = points[offset + i].y;
}
}
return new double[] {minX, maxX, minY, maxY};
}
/**
* Concatenate a set of points to a polygon
*
* @param component
* component id of the polygon
* @param direction
* direction of the ring
* @param points
* list of points to concatenate
* @param pointOffset
* index of the first point
* @param edges
* Array of edges to write the result to
* @param edgeOffset
* index of the first edge in the result
* @param length
* number of points to use
* @return the edges creates
*/
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
int length) {
assert edges.length >= length+edgeOffset;
assert points.length >= length+pointOffset;
edges[edgeOffset] = new Edge(points[pointOffset], null);
for (int i = 1; i < length; i++) {
if (direction) {
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
edges[edgeOffset + i].component = component;
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
edges[edgeOffset + i - 1].component = component;
} else {
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
}
}
if (direction) {
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
edges[edgeOffset].component = component;
} else {
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
edges[edgeOffset + length - 1].component = component;
}
return edges;
}
/**
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
*/
private static void translate(Coordinate[] points) {
for (Coordinate c : points) {
if (c.x < 0) {
c.x += 2*DATELINE;
}
}
}
} }

View File

@ -362,150 +362,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
} }
} }
private static final int top(Coordinate[] points, int offset, int length) {
int top = 0; // we start at 1 here since top points to 0
for (int i = 1; i < length; i++) {
if (points[offset + i].y < points[offset + top].y) {
top = i;
} else if (points[offset + i].y == points[offset + top].y) {
if (points[offset + i].x < points[offset + top].x) {
top = i;
}
}
}
return top;
}
private static final double[] range(Coordinate[] points, int offset, int length) {
double minX = points[0].x;
double maxX = points[0].x;
double minY = points[0].y;
double maxY = points[0].y;
// compute the bounding coordinates (@todo: cleanup brute force)
for (int i = 1; i < length; ++i) {
if (points[offset + i].x < minX) {
minX = points[offset + i].x;
}
if (points[offset + i].x > maxX) {
maxX = points[offset + i].x;
}
if (points[offset + i].y < minY) {
minY = points[offset + i].y;
}
if (points[offset + i].y > maxY) {
maxY = points[offset + i].y;
}
}
return new double[] {minX, maxX, minY, maxY};
}
/**
* Concatenate a set of points to a polygon
*
* @param component
* component id of the polygon
* @param direction
* direction of the ring
* @param points
* list of points to concatenate
* @param pointOffset
* index of the first point
* @param edges
* Array of edges to write the result to
* @param edgeOffset
* index of the first edge in the result
* @param length
* number of points to use
* @return the edges creates
*/
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
int length) {
assert edges.length >= length+edgeOffset;
assert points.length >= length+pointOffset;
edges[edgeOffset] = new Edge(points[pointOffset], null);
for (int i = 1; i < length; i++) {
if (direction) {
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
edges[edgeOffset + i].component = component;
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
edges[edgeOffset + i - 1].component = component;
} else {
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
}
}
if (direction) {
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
edges[edgeOffset].component = component;
} else {
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
edges[edgeOffset + length - 1].component = component;
}
return edges;
}
/**
* Create a connected list of a list of coordinates
*
* @param points
* array of point
* @param offset
* index of the first point
* @param length
* number of points
* @return Array of edges
*/
protected static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
Coordinate[] points, int offset, Edge[] edges, int toffset, int length) {
// calculate the direction of the points:
// find the point a the top of the set and check its
// neighbors orientation. So direction is equivalent
// to clockwise/counterclockwise
final int top = top(points, offset, length);
final int prev = (offset + ((top + length - 1) % length));
final int next = (offset + ((top + 1) % length));
boolean orientation = points[offset + prev].x > points[offset + next].x;
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
// thus if orientation is computed as cw, the logic will translate points across dateline
// and convert to a right handed system
// compute the bounding box and calculate range
double[] range = range(points, offset, length);
final double rng = range[1] - range[0];
// translate the points if the following is true
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
// (translation would result in a collapsed poly)
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
boolean incorrectOrientation = component == 0 && handedness != orientation;
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (shell.translated && component != 0)) {
translate(points);
// flip the translation bit if the shell is being translated
if (component == 0) {
shell.translated = true;
}
// correct the orientation post translation (ccw for shell, cw for holes)
if (component == 0 || (component != 0 && handedness == orientation)) {
orientation = !orientation;
}
}
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
}
/**
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
*/
protected static void translate(Coordinate[] points) {
for (Coordinate c : points) {
if (c.x < 0) {
c.x += 2*DATELINE;
}
}
}
/** /**
* Set the intersection of this line segment to the given position * Set the intersection of this line segment to the given position
* *
@ -517,7 +373,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
return intersect = position(coordinate, next.coordinate, position); return intersect = position(coordinate, next.coordinate, position);
} }
public static Coordinate position(Coordinate p1, Coordinate p2, double position) { protected static Coordinate position(Coordinate p1, Coordinate p2, double position) {
if (position == 0) { if (position == 0) {
return p1; return p1;
} else if (position == 1) { } else if (position == 1) {
@ -542,7 +398,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
public int compare(Edge o1, Edge o2) { public int compare(Edge o1, Edge o2) {
return Double.compare(o1.intersect.y, o2.intersect.y); return Double.compare(o1.intersect.y, o2.intersect.y);
} }
} }
public static enum Orientation { public static enum Orientation {

View File

@ -19,8 +19,6 @@
package org.elasticsearch.common.lease; package org.elasticsearch.common.lease;
import org.elasticsearch.ElasticsearchException;
import java.util.Arrays; import java.util.Arrays;
/** Utility methods to work with {@link Releasable}s. */ /** Utility methods to work with {@link Releasable}s. */

View File

@ -123,7 +123,7 @@ public abstract class ExtensionPoint {
public static final class SelectedType<T> extends ClassMap<T> { public static final class SelectedType<T> extends ClassMap<T> {
public SelectedType(String name, Class<T> extensionClass) { public SelectedType(String name, Class<T> extensionClass) {
super(name, extensionClass, Collections.EMPTY_SET); super(name, extensionClass, Collections.emptySet());
} }
/** /**

View File

@ -133,7 +133,7 @@ public class NodeJoinController extends AbstractComponent {
/** utility method to fail the given election context under the cluster state thread */ /** utility method to fail the given election context under the cluster state thread */
private void failContext(final ElectionContext context, final String reason, final Throwable throwable) { private void failContext(final ElectionContext context, final String reason, final Throwable throwable) {
clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public boolean runOnlyOnMaster() { public boolean runOnlyOnMaster() {
@ -231,7 +231,7 @@ public class NodeJoinController extends AbstractComponent {
} }
final String source = "zen-disco-join(elected_as_master, [" + pendingMasterJoins + "] joins received)"; final String source = "zen-disco-join(elected_as_master, [" + pendingMasterJoins + "] joins received)";
clusterService.submitStateUpdateTask(source, Priority.IMMEDIATE, new ProcessJoinsTask() { clusterService.submitStateUpdateTask(source, new ProcessJoinsTask(Priority.IMMEDIATE) {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
// Take into account the previous known nodes, if they happen not to be available // Take into account the previous known nodes, if they happen not to be available
@ -280,7 +280,7 @@ public class NodeJoinController extends AbstractComponent {
/** process all pending joins */ /** process all pending joins */
private void processJoins(String reason) { private void processJoins(String reason) {
clusterService.submitStateUpdateTask("zen-disco-join(" + reason + ")", Priority.URGENT, new ProcessJoinsTask()); clusterService.submitStateUpdateTask("zen-disco-join(" + reason + ")", new ProcessJoinsTask(Priority.URGENT));
} }
@ -356,6 +356,10 @@ public class NodeJoinController extends AbstractComponent {
private final List<MembershipAction.JoinCallback> joinCallbacksToRespondTo = new ArrayList<>(); private final List<MembershipAction.JoinCallback> joinCallbacksToRespondTo = new ArrayList<>();
private boolean nodeAdded = false; private boolean nodeAdded = false;
public ProcessJoinsTask(Priority priority) {
super(priority);
}
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder; DiscoveryNodes.Builder nodesBuilder;

View File

@ -320,7 +320,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
} catch (FailedToCommitClusterStateException t) { } catch (FailedToCommitClusterStateException t) {
// cluster service logs a WARN message // cluster service logs a WARN message
logger.debug("failed to publish cluster state version [{}] (not enough nodes acknowledged, min master nodes [{}])", clusterChangedEvent.state().version(), electMaster.minimumMasterNodes()); logger.debug("failed to publish cluster state version [{}] (not enough nodes acknowledged, min master nodes [{}])", clusterChangedEvent.state().version(), electMaster.minimumMasterNodes());
clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
return rejoin(currentState, "failed to publish to min_master_nodes"); return rejoin(currentState, "failed to publish to min_master_nodes");
@ -498,7 +498,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return; return;
} }
if (localNodeMaster()) { if (localNodeMaster()) {
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id()); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id());
@ -538,7 +538,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// nothing to do here... // nothing to do here...
return; return;
} }
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
if (currentState.nodes().get(node.id()) == null) { if (currentState.nodes().get(node.id()) == null) {
@ -587,7 +587,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership. // We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership.
return; return;
} }
clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
// check if we have enough master nodes, if not, we need to move into joining the cluster again // check if we have enough master nodes, if not, we need to move into joining the cluster again
@ -627,7 +627,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
logger.info("master_left [{}], reason [{}]", cause, masterNode, reason); logger.info("master_left [{}], reason [{}]", cause, masterNode, reason);
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public boolean runOnlyOnMaster() { public boolean runOnlyOnMaster() {
@ -694,7 +694,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
} }
void processNextPendingClusterState(String reason) { void processNextPendingClusterState(String reason) {
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", Priority.URGENT, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new ClusterStateUpdateTask(Priority.URGENT) {
@Override @Override
public boolean runOnlyOnMaster() { public boolean runOnlyOnMaster() {
return false; return false;
@ -1059,7 +1059,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return; return;
} }
logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get()); logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get());
clusterService.submitStateUpdateTask("ping from another master", Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("ping from another master", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public ClusterState execute(ClusterState currentState) throws Exception { public ClusterState execute(ClusterState currentState) throws Exception {
@ -1114,7 +1114,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
class RejoinClusterRequestHandler implements TransportRequestHandler<RejoinClusterRequest> { class RejoinClusterRequestHandler implements TransportRequestHandler<RejoinClusterRequest> {
@Override @Override
public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception { public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception {
clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateUpdateTask() { clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override @Override
public boolean runOnlyOnMaster() { public boolean runOnlyOnMaster() {

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*; import org.elasticsearch.transport.*;
@ -41,7 +42,7 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
public class NodesFaultDetection extends FaultDetection { public class NodesFaultDetection extends FaultDetection {
public static final String PING_ACTION_NAME = "internal:discovery/zen/fd/ping"; public static final String PING_ACTION_NAME = "internal:discovery/zen/fd/ping";
public abstract static class Listener { public abstract static class Listener {
public void onNodeFailure(DiscoveryNode node, String reason) {} public void onNodeFailure(DiscoveryNode node, String reason) {}
@ -145,14 +146,18 @@ public class NodesFaultDetection extends FaultDetection {
} }
private void notifyNodeFailure(final DiscoveryNode node, final String reason) { private void notifyNodeFailure(final DiscoveryNode node, final String reason) {
threadPool.generic().execute(new Runnable() { try {
@Override threadPool.generic().execute(new Runnable() {
public void run() { @Override
for (Listener listener : listeners) { public void run() {
listener.onNodeFailure(node, reason); for (Listener listener : listeners) {
listener.onNodeFailure(node, reason);
}
} }
} });
}); } catch (EsRejectedExecutionException ex) {
logger.trace("[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", ex, node, reason);
}
} }
private void notifyPingReceived(final PingRequest pingRequest) { private void notifyPingReceived(final PingRequest pingRequest) {

View File

@ -58,6 +58,8 @@ public class Environment {
private final Path pluginsFile; private final Path pluginsFile;
private final Path modulesFile;
private final Path sharedDataFile; private final Path sharedDataFile;
/** location of bin/, used by plugin manager */ /** location of bin/, used by plugin manager */
@ -157,6 +159,7 @@ public class Environment {
binFile = homeFile.resolve("bin"); binFile = homeFile.resolve("bin");
libFile = homeFile.resolve("lib"); libFile = homeFile.resolve("lib");
modulesFile = homeFile.resolve("modules");
} }
/** /**
@ -275,6 +278,10 @@ public class Environment {
return libFile; return libFile;
} }
public Path modulesFile() {
return modulesFile;
}
public Path logsFile() { public Path logsFile() {
return logsFile; return logsFile;
} }

View File

@ -131,7 +131,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
if (metaData != null) { if (metaData != null) {
ShardPath shardPath = null; ShardPath shardPath = null;
try { try {
IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.EMPTY_LIST); IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings); shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
if (shardPath == null) { if (shardPath == null) {
throw new IllegalStateException(shardId + " no shard path found"); throw new IllegalStateException(shardId + " no shard path found");

View File

@ -55,7 +55,7 @@ public final class AnalysisRegistry implements Closeable {
private final Environment environemnt; private final Environment environemnt;
public AnalysisRegistry(HunspellService hunspellService, Environment environment) { public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
this(hunspellService, environment, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP); this(hunspellService, environment, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
} }
public AnalysisRegistry(HunspellService hunspellService, Environment environment, public AnalysisRegistry(HunspellService hunspellService, Environment environment,

View File

@ -19,22 +19,32 @@
package org.elasticsearch.index.fieldvisitor; package org.elasticsearch.index.fieldvisitor;
import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfo;
import org.elasticsearch.common.regex.Regex;
import java.io.IOException; import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Set; import java.util.Set;
/** /**
* A field visitor that allows to load a selection of the stored fields. * A field visitor that allows to load a selection of the stored fields by exact name or by pattern.
* Supported pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy".
* The Uid field is always loaded. * The Uid field is always loaded.
* The class is optimized for source loading as it is a common use case. * The class is optimized for source loading as it is a common use case.
*/ */
public class CustomFieldsVisitor extends FieldsVisitor { public class CustomFieldsVisitor extends FieldsVisitor {
private final Set<String> fields; private final Set<String> fields;
private final List<String> patterns;
public CustomFieldsVisitor(Set<String> fields, boolean loadSource) { public CustomFieldsVisitor(Set<String> fields, List<String> patterns, boolean loadSource) {
super(loadSource); super(loadSource);
this.fields = fields; this.fields = fields;
this.patterns = patterns;
}
public CustomFieldsVisitor(Set<String> fields, boolean loadSource) {
this(fields, Collections.emptyList(), loadSource);
} }
@Override @Override
@ -42,7 +52,14 @@ public class CustomFieldsVisitor extends FieldsVisitor {
if (super.needsField(fieldInfo) == Status.YES) { if (super.needsField(fieldInfo) == Status.YES) {
return Status.YES; return Status.YES;
} }
if (fields.contains(fieldInfo.name)) {
return fields.contains(fieldInfo.name) ? Status.YES : Status.NO; return Status.YES;
}
for (String pattern : patterns) {
if (Regex.simpleMatch(pattern, fieldInfo.name)) {
return Status.YES;
}
}
return Status.NO;
} }
} }

View File

@ -336,8 +336,6 @@ public class DocumentMapper implements ToXContent {
private void addMappers(Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) { private void addMappers(Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
assert mappingLock.isWriteLockedByCurrentThread(); assert mappingLock.isWriteLockedByCurrentThread();
// first ensure we don't have any incompatible new fields
mapperService.checkNewMappersCompatibility(objectMappers, fieldMappers, updateAllTypes);
// update mappers for this document type // update mappers for this document type
Map<String, ObjectMapper> builder = new HashMap<>(this.objectMappers); Map<String, ObjectMapper> builder = new HashMap<>(this.objectMappers);
@ -351,11 +349,12 @@ public class DocumentMapper implements ToXContent {
this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers); this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers);
// finally update for the entire index // finally update for the entire index
mapperService.addMappers(objectMappers, fieldMappers); mapperService.addMappers(type, objectMappers, fieldMappers);
} }
public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
try (ReleasableLock lock = mappingWriteLock.acquire()) { try (ReleasableLock lock = mappingWriteLock.acquire()) {
mapperService.checkMappersCompatibility(type, mapping, updateAllTypes);
final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes); final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes);
this.mapping.merge(mapping, mergeResult); this.mapping.merge(mapping, mergeResult);
if (simulate == false) { if (simulate == false) {

View File

@ -28,8 +28,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType;
@ -47,7 +45,6 @@ import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Set; import java.util.Set;
/** A parser for documents, given mappings from a DocumentMapper */ /** A parser for documents, given mappings from a DocumentMapper */
@ -79,6 +76,10 @@ class DocumentParser implements Closeable {
} }
private ParsedDocument innerParseDocument(SourceToParse source) throws MapperParsingException { private ParsedDocument innerParseDocument(SourceToParse source) throws MapperParsingException {
if (docMapper.type().equals(MapperService.DEFAULT_MAPPING)) {
throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]");
}
ParseContext.InternalParseContext context = cache.get(); ParseContext.InternalParseContext context = cache.get();
final Mapping mapping = docMapper.mapping(); final Mapping mapping = docMapper.mapping();
@ -712,37 +713,64 @@ class DocumentParser implements Closeable {
// The path of the dest field might be completely different from the current one so we need to reset it // The path of the dest field might be completely different from the current one so we need to reset it
context = context.overridePath(new ContentPath(0)); context = context.overridePath(new ContentPath(0));
String[] paths = Strings.splitStringToArray(field, '.');
String fieldName = paths[paths.length-1];
ObjectMapper mapper = context.root(); ObjectMapper mapper = context.root();
String objectPath = ""; ObjectMapper[] mappers = new ObjectMapper[paths.length-1];
String fieldPath = field; if (paths.length > 1) {
int posDot = field.lastIndexOf('.'); ObjectMapper parent = context.root();
if (posDot > 0) { for (int i = 0; i < paths.length-1; i++) {
objectPath = field.substring(0, posDot); mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i]));
context.path().add(objectPath); if (mapper == null) {
mapper = context.docMapper().objectMappers().get(objectPath); // One mapping is missing, check if we are allowed to create a dynamic one.
fieldPath = field.substring(posDot + 1); ObjectMapper.Dynamic dynamic = parent.dynamic();
if (dynamic == null) {
dynamic = dynamicOrDefault(context.root().dynamic());
}
switch (dynamic) {
case STRICT:
throw new StrictDynamicMappingException(parent.fullPath(), paths[i]);
case TRUE:
Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], "object");
if (builder == null) {
// if this is a non root object, then explicitly set the dynamic behavior if set
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
}
builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType());
}
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
mapper = (ObjectMapper) builder.build(builderContext);
if (mapper.nested() != ObjectMapper.Nested.NO) {
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`");
}
break;
case FALSE:
// Maybe we should log something to tell the user that the copy_to is ignored in this case.
break;
default:
throw new AssertionError("Unexpected dynamic type " + dynamic);
}
}
context.path().add(paths[i]);
mappers[i] = mapper;
parent = mapper;
}
} }
if (mapper == null) { ObjectMapper update = parseDynamicValue(context, mapper, fieldName, context.parser().currentToken());
//TODO: Create an object dynamically?
throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]");
}
ObjectMapper update = parseDynamicValue(context, mapper, fieldPath, context.parser().currentToken());
assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping
// propagate the update to the root if (paths.length > 1) {
while (objectPath.length() > 0) { for (int i = paths.length - 2; i >= 0; i--) {
String parentPath = ""; ObjectMapper parent = context.root();
ObjectMapper parent = context.root(); if (i > 0) {
posDot = objectPath.lastIndexOf('.'); parent = mappers[i-1];
if (posDot > 0) { }
parentPath = objectPath.substring(0, posDot); assert parent != null;
parent = context.docMapper().objectMappers().get(parentPath); update = parent.mappingUpdate(update);
} }
if (parent == null) {
throw new IllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]");
}
update = parent.mappingUpdate(update);
objectPath = parentPath;
} }
context.addDynamicMappingsUpdate(update); context.addDynamicMappingsUpdate(update);
} }

View File

@ -307,7 +307,6 @@ public abstract class FieldMapper extends Mapper {
if (ref.get().equals(fieldType()) == false) { if (ref.get().equals(fieldType()) == false) {
throw new IllegalStateException("Cannot overwrite field type reference to unequal reference"); throw new IllegalStateException("Cannot overwrite field type reference to unequal reference");
} }
ref.incrementAssociatedMappers();
this.fieldTypeRef = ref; this.fieldTypeRef = ref;
} }
@ -360,7 +359,7 @@ public abstract class FieldMapper extends Mapper {
} }
@Override @Override
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { public void merge(Mapper mergeWith, MergeResult mergeResult) {
if (!this.getClass().equals(mergeWith.getClass())) { if (!this.getClass().equals(mergeWith.getClass())) {
String mergedType = mergeWith.getClass().getSimpleName(); String mergedType = mergeWith.getClass().getSimpleName();
if (mergeWith instanceof FieldMapper) { if (mergeWith instanceof FieldMapper) {
@ -371,20 +370,6 @@ public abstract class FieldMapper extends Mapper {
return; return;
} }
FieldMapper fieldMergeWith = (FieldMapper) mergeWith; FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
List<String> subConflicts = new ArrayList<>(); // TODO: just expose list from MergeResult?
fieldType().checkTypeName(fieldMergeWith.fieldType(), subConflicts);
if (subConflicts.isEmpty() == false) {
// return early if field types don't match
assert subConflicts.size() == 1;
mergeResult.addConflict(subConflicts.get(0));
return;
}
boolean strict = this.fieldTypeRef.getNumAssociatedMappers() > 1 && mergeResult.updateAllTypes() == false;
fieldType().checkCompatibility(fieldMergeWith.fieldType(), subConflicts, strict);
for (String conflict : subConflicts) {
mergeResult.addConflict(conflict);
}
multiFields.merge(mergeWith, mergeResult); multiFields.merge(mergeWith, mergeResult);
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
@ -614,7 +599,7 @@ public abstract class FieldMapper extends Mapper {
} }
// No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { public void merge(Mapper mergeWith, MergeResult mergeResult) {
FieldMapper mergeWithMultiField = (FieldMapper) mergeWith; FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
List<FieldMapper> newFieldMappers = null; List<FieldMapper> newFieldMappers = null;

View File

@ -24,9 +24,11 @@ import org.elasticsearch.common.regex.Regex;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.Set; import java.util.Set;
/** /**
@ -37,18 +39,49 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
/** Full field name to field type */ /** Full field name to field type */
private final CopyOnWriteHashMap<String, MappedFieldTypeReference> fullNameToFieldType; private final CopyOnWriteHashMap<String, MappedFieldTypeReference> fullNameToFieldType;
/** Full field name to types containing a mapping for this full name. */
private final CopyOnWriteHashMap<String, Set<String>> fullNameToTypes;
/** Index field name to field type */ /** Index field name to field type */
private final CopyOnWriteHashMap<String, MappedFieldTypeReference> indexNameToFieldType; private final CopyOnWriteHashMap<String, MappedFieldTypeReference> indexNameToFieldType;
/** Index field name to types containing a mapping for this index name. */
private final CopyOnWriteHashMap<String, Set<String>> indexNameToTypes;
/** Create a new empty instance. */ /** Create a new empty instance. */
public FieldTypeLookup() { public FieldTypeLookup() {
fullNameToFieldType = new CopyOnWriteHashMap<>(); fullNameToFieldType = new CopyOnWriteHashMap<>();
fullNameToTypes = new CopyOnWriteHashMap<>();
indexNameToFieldType = new CopyOnWriteHashMap<>(); indexNameToFieldType = new CopyOnWriteHashMap<>();
indexNameToTypes = new CopyOnWriteHashMap<>();
} }
private FieldTypeLookup(CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName, CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName) { private FieldTypeLookup(
fullNameToFieldType = fullName; CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName,
indexNameToFieldType = indexName; CopyOnWriteHashMap<String, Set<String>> fullNameToTypes,
CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName,
CopyOnWriteHashMap<String, Set<String>> indexNameToTypes) {
this.fullNameToFieldType = fullName;
this.fullNameToTypes = fullNameToTypes;
this.indexNameToFieldType = indexName;
this.indexNameToTypes = indexNameToTypes;
}
private static CopyOnWriteHashMap<String, Set<String>> addType(CopyOnWriteHashMap<String, Set<String>> map, String key, String type) {
Set<String> types = map.get(key);
if (types == null) {
return map.copyAndPut(key, Collections.singleton(type));
} else if (types.contains(type)) {
// noting to do
return map;
} else {
Set<String> newTypes = new HashSet<>(types.size() + 1);
newTypes.addAll(types);
newTypes.add(type);
assert newTypes.size() == types.size() + 1;
newTypes = Collections.unmodifiableSet(newTypes);
return map.copyAndPut(key, newTypes);
}
} }
/** /**
@ -56,9 +89,15 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
* from the provided fields. If a field already exists, the field type will be updated * from the provided fields. If a field already exists, the field type will be updated
* to use the new mappers field type. * to use the new mappers field type.
*/ */
public FieldTypeLookup copyAndAddAll(Collection<FieldMapper> newFieldMappers) { public FieldTypeLookup copyAndAddAll(String type, Collection<FieldMapper> newFieldMappers) {
Objects.requireNonNull(type, "type must not be null");
if (MapperService.DEFAULT_MAPPING.equals(type)) {
throw new IllegalArgumentException("Default mappings should not be added to the lookup");
}
CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName = this.fullNameToFieldType; CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName = this.fullNameToFieldType;
CopyOnWriteHashMap<String, Set<String>> fullNameToTypes = this.fullNameToTypes;
CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName = this.indexNameToFieldType; CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName = this.indexNameToFieldType;
CopyOnWriteHashMap<String, Set<String>> indexNameToTypes = this.indexNameToTypes;
for (FieldMapper fieldMapper : newFieldMappers) { for (FieldMapper fieldMapper : newFieldMappers) {
MappedFieldType fieldType = fieldMapper.fieldType(); MappedFieldType fieldType = fieldMapper.fieldType();
@ -86,8 +125,23 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
// this new field bridges between two existing field names (a full and index name), which we cannot support // this new field bridges between two existing field names (a full and index name), which we cannot support
throw new IllegalStateException("insane mappings found. field " + fieldType.names().fullName() + " maps across types to field " + fieldType.names().indexName()); throw new IllegalStateException("insane mappings found. field " + fieldType.names().fullName() + " maps across types to field " + fieldType.names().indexName());
} }
fullNameToTypes = addType(fullNameToTypes, fieldType.names().fullName(), type);
indexNameToTypes = addType(indexNameToTypes, fieldType.names().indexName(), type);
}
return new FieldTypeLookup(fullName, fullNameToTypes, indexName, indexNameToTypes);
}
private static boolean beStrict(String type, Set<String> types, boolean updateAllTypes) {
assert types.size() >= 1;
if (updateAllTypes) {
return false;
} else if (types.size() == 1 && types.contains(type)) {
// we are implicitly updating all types
return false;
} else {
return true;
} }
return new FieldTypeLookup(fullName, indexName);
} }
/** /**
@ -95,16 +149,14 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
* If any are not compatible, an IllegalArgumentException is thrown. * If any are not compatible, an IllegalArgumentException is thrown.
* If updateAllTypes is true, only basic compatibility is checked. * If updateAllTypes is true, only basic compatibility is checked.
*/ */
public void checkCompatibility(Collection<FieldMapper> newFieldMappers, boolean updateAllTypes) { public void checkCompatibility(String type, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
for (FieldMapper fieldMapper : newFieldMappers) { for (FieldMapper fieldMapper : fieldMappers) {
MappedFieldTypeReference ref = fullNameToFieldType.get(fieldMapper.fieldType().names().fullName()); MappedFieldTypeReference ref = fullNameToFieldType.get(fieldMapper.fieldType().names().fullName());
if (ref != null) { if (ref != null) {
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
ref.get().checkTypeName(fieldMapper.fieldType(), conflicts); final Set<String> types = fullNameToTypes.get(fieldMapper.fieldType().names().fullName());
if (conflicts.isEmpty()) { // only check compat if they are the same type boolean strict = beStrict(type, types, updateAllTypes);
boolean strict = updateAllTypes == false; ref.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
ref.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
}
if (conflicts.isEmpty() == false) { if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with existing mapping in other types:\n" + conflicts.toString()); throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with existing mapping in other types:\n" + conflicts.toString());
} }
@ -114,11 +166,9 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
MappedFieldTypeReference indexNameRef = indexNameToFieldType.get(fieldMapper.fieldType().names().indexName()); MappedFieldTypeReference indexNameRef = indexNameToFieldType.get(fieldMapper.fieldType().names().indexName());
if (indexNameRef != null) { if (indexNameRef != null) {
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
indexNameRef.get().checkTypeName(fieldMapper.fieldType(), conflicts); final Set<String> types = indexNameToTypes.get(fieldMapper.fieldType().names().indexName());
if (conflicts.isEmpty()) { // only check compat if they are the same type boolean strict = beStrict(type, types, updateAllTypes);
boolean strict = updateAllTypes == false; indexNameRef.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
indexNameRef.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
}
if (conflicts.isEmpty() == false) { if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with mapping with the same index name in other types" + conflicts.toString()); throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with mapping with the same index name in other types" + conflicts.toString());
} }
@ -133,6 +183,15 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
return ref.get(); return ref.get();
} }
/** Get the set of types that have a mapping for the given field. */
public Set<String> getTypes(String field) {
Set<String> types = fullNameToTypes.get(field);
if (types == null) {
types = Collections.emptySet();
}
return types;
}
/** Returns the field type for the given index name */ /** Returns the field type for the given index name */
public MappedFieldType getByIndexName(String field) { public MappedFieldType getByIndexName(String field) {
MappedFieldTypeReference ref = indexNameToFieldType.get(field); MappedFieldTypeReference ref = indexNameToFieldType.get(field);
@ -140,6 +199,15 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
return ref.get(); return ref.get();
} }
/** Get the set of types that have a mapping for the given field. */
public Set<String> getTypesByIndexName(String field) {
Set<String> types = indexNameToTypes.get(field);
if (types == null) {
types = Collections.emptySet();
}
return types;
}
/** /**
* Returns a list of the index names of a simple match regex like pattern against full name and index name. * Returns a list of the index names of a simple match regex like pattern against full name and index name.
*/ */

View File

@ -229,9 +229,9 @@ public abstract class MappedFieldType extends FieldType {
public abstract String typeName(); public abstract String typeName();
/** Checks this type is the same type as other. Adds a conflict if they are different. */ /** Checks this type is the same type as other. Adds a conflict if they are different. */
public final void checkTypeName(MappedFieldType other, List<String> conflicts) { private final void checkTypeName(MappedFieldType other) {
if (typeName().equals(other.typeName()) == false) { if (typeName().equals(other.typeName()) == false) {
conflicts.add("mapper [" + names().fullName() + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]"); throw new IllegalArgumentException("mapper [" + names().fullName() + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]");
} else if (getClass() != other.getClass()) { } else if (getClass() != other.getClass()) {
throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName()); throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName());
} }
@ -243,6 +243,8 @@ public abstract class MappedFieldType extends FieldType {
* Otherwise, only properties which must never change in an index are checked. * Otherwise, only properties which must never change in an index are checked.
*/ */
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) {
checkTypeName(other);
boolean indexed = indexOptions() != IndexOptions.NONE; boolean indexed = indexOptions() != IndexOptions.NONE;
boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE; boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE;
// TODO: should be validating if index options go "up" (but "down" is ok) // TODO: should be validating if index options go "up" (but "down" is ok)

View File

@ -23,12 +23,10 @@ package org.elasticsearch.index.mapper;
*/ */
public class MappedFieldTypeReference { public class MappedFieldTypeReference {
private MappedFieldType fieldType; // the current field type this reference points to private MappedFieldType fieldType; // the current field type this reference points to
private int numAssociatedMappers;
public MappedFieldTypeReference(MappedFieldType fieldType) { public MappedFieldTypeReference(MappedFieldType fieldType) {
fieldType.freeze(); // ensure frozen fieldType.freeze(); // ensure frozen
this.fieldType = fieldType; this.fieldType = fieldType;
this.numAssociatedMappers = 1;
} }
public MappedFieldType get() { public MappedFieldType get() {
@ -40,11 +38,4 @@ public class MappedFieldTypeReference {
this.fieldType = fieldType; this.fieldType = fieldType;
} }
public int getNumAssociatedMappers() {
return numAssociatedMappers;
}
public void incrementAssociatedMappers() {
++numAssociatedMappers;
}
} }

Some files were not shown because too many files have changed in this diff Show More