mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-16 18:04:52 +00:00
Merge branch 'master' into standalone_tests_intellij
This commit is contained in:
commit
fa1c708ccd
10
.editorconfig
Normal file
10
.editorconfig
Normal file
@ -0,0 +1,10 @@
|
||||
# EditorConfig: http://editorconfig.org/
|
||||
|
||||
root = true
|
||||
|
||||
[*.java]
|
||||
charset = utf-8
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
@ -149,17 +149,23 @@ gradle test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix)
|
||||
|
||||
=== Load balancing and caches.
|
||||
|
||||
By default, the tests run sequentially on a single forked JVM.
|
||||
|
||||
To run with more forked JVMs than the default use:
|
||||
By default the tests run on up to 4 JVMs based on the number of cores. If you
|
||||
want to explicitly specify the number of JVMs you can do so on the command
|
||||
line:
|
||||
|
||||
----------------------------
|
||||
gradle test -Dtests.jvms=8
|
||||
----------------------------
|
||||
|
||||
Don't count hypercores for CPU-intense tests and leave some slack
|
||||
for JVM-internal threads (like the garbage collector). Make sure there is
|
||||
enough RAM to handle child JVMs.
|
||||
Or in `~/.gradle/gradle.properties`:
|
||||
|
||||
----------------------------
|
||||
systemProp.tests.jvms=8
|
||||
----------------------------
|
||||
|
||||
Its difficult to pick the "right" number here. Hypercores don't count for CPU
|
||||
intensive tests and you should leave some slack for JVM-interal threads like
|
||||
the garbage collector. And you have to have enough RAM to handle each JVM.
|
||||
|
||||
=== Test compatibility.
|
||||
|
||||
@ -280,11 +286,20 @@ The REST layer is tested through specific tests that are shared between all
|
||||
the elasticsearch official clients and consist of YAML files that describe the
|
||||
operations to be executed and the obtained results that need to be tested.
|
||||
|
||||
The REST tests are run automatically when executing the maven test command. To run only the
|
||||
The REST tests are run automatically when executing the "gradle check" command. To run only the
|
||||
REST tests use the following command:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle integTest -Dtests.filter="@Rest"
|
||||
gradle :distribution:tar:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
A specific test case can be run with
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT \
|
||||
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
`RestNIT` are the executable test classes that runs all the
|
||||
|
18
build.gradle
18
build.gradle
@ -45,7 +45,7 @@ subprojects {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
extraArchive {
|
||||
javadoc = true
|
||||
tests = false
|
||||
@ -86,8 +86,8 @@ subprojects {
|
||||
tasks.withType(Jar) {
|
||||
into('META-INF') {
|
||||
from project.rootProject.rootDir
|
||||
include 'LICENSE.txt'
|
||||
include 'NOTICE.txt'
|
||||
include 'LICENSE.txt'
|
||||
include 'NOTICE.txt'
|
||||
}
|
||||
}
|
||||
// ignore missing javadocs
|
||||
@ -101,12 +101,19 @@ subprojects {
|
||||
}
|
||||
}
|
||||
|
||||
/* Sets up the dependencies that we build as part of this project but
|
||||
register as thought they were external to resolve internally. We register
|
||||
them as external dependencies so the build plugin that we use can be used
|
||||
to build elasticsearch plugins outside of the elasticsearch source tree. */
|
||||
ext.projectSubstitutions = [
|
||||
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
|
||||
"org.elasticsearch:elasticsearch:${version}": ':core',
|
||||
"org.elasticsearch:test-framework:${version}": ':test-framework',
|
||||
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar'
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
|
||||
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
|
||||
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
|
||||
]
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
@ -232,7 +239,7 @@ class Run extends DefaultTask {
|
||||
)
|
||||
public void setDebug(boolean enabled) {
|
||||
project.project(':distribution').run.clusterConfig.debug = enabled
|
||||
}
|
||||
}
|
||||
}
|
||||
task run(type: Run) {
|
||||
dependsOn ':distribution:run'
|
||||
@ -240,4 +247,3 @@ task run(type: Run) {
|
||||
group = 'Verification'
|
||||
impliesSubProjects = true
|
||||
}
|
||||
|
||||
|
@ -80,3 +80,13 @@ eclipse {
|
||||
defaultOutputDir = new File(file('build'), 'eclipse')
|
||||
}
|
||||
}
|
||||
|
||||
task copyEclipseSettings(type: Copy) {
|
||||
from project.file('src/main/resources/eclipse.settings')
|
||||
into '.settings'
|
||||
}
|
||||
// otherwise .settings is not nuked entirely
|
||||
tasks.cleanEclipse {
|
||||
delete '.settings'
|
||||
}
|
||||
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
|
||||
|
@ -1,5 +1,6 @@
|
||||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
class TestLoggingConfiguration {
|
||||
@ -20,6 +21,10 @@ class TestLoggingConfiguration {
|
||||
SlowTestsConfiguration slowTests = new SlowTestsConfiguration()
|
||||
StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration()
|
||||
|
||||
/** Summarize the first N failures at the end of the test. */
|
||||
@Input
|
||||
int showNumFailuresAtEnd = 3 // match TextReport default
|
||||
|
||||
void slowTests(Closure closure) {
|
||||
ConfigureUtil.configure(closure, slowTests)
|
||||
}
|
||||
@ -31,4 +36,8 @@ class TestLoggingConfiguration {
|
||||
void outputMode(String mode) {
|
||||
outputMode = mode.toUpperCase() as OutputMode
|
||||
}
|
||||
|
||||
void showNumFailuresAtEnd(int n) {
|
||||
showNumFailuresAtEnd = n
|
||||
}
|
||||
}
|
||||
|
@ -48,9 +48,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
|
||||
/** Format line for JVM ID string. */
|
||||
String jvmIdFormat
|
||||
|
||||
/** Summarize the first N failures at the end. */
|
||||
int showNumFailuresAtEnd = 3
|
||||
|
||||
/** Output stream that logs messages to the given logger */
|
||||
LoggingOutputStream outStream
|
||||
LoggingOutputStream errStream
|
||||
@ -110,13 +107,13 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
|
||||
|
||||
@Subscribe
|
||||
void onQuit(AggregatedQuitEvent e) throws IOException {
|
||||
if (showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
|
||||
if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
|
||||
List<Description> sublist = this.failedTests
|
||||
StringBuilder b = new StringBuilder()
|
||||
b.append('Tests with failures')
|
||||
if (sublist.size() > showNumFailuresAtEnd) {
|
||||
sublist = sublist.subList(0, showNumFailuresAtEnd)
|
||||
b.append(" (first " + showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
|
||||
if (sublist.size() > config.showNumFailuresAtEnd) {
|
||||
sublist = sublist.subList(0, config.showNumFailuresAtEnd)
|
||||
b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
|
||||
}
|
||||
b.append(':\n')
|
||||
for (Description description : sublist) {
|
||||
|
@ -62,7 +62,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
configureCompile(project)
|
||||
|
||||
configureTest(project)
|
||||
PrecommitTasks.configure(project)
|
||||
configurePrecommit(project)
|
||||
}
|
||||
|
||||
/** Performs checks on the build environment and prints information about the build environment. */
|
||||
@ -283,6 +283,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
|
||||
/** Adds compiler settings to the project */
|
||||
static void configureCompile(Project project) {
|
||||
project.ext.compactProfile = 'compact3'
|
||||
project.afterEvaluate {
|
||||
// fail on all javac warnings
|
||||
project.tasks.withType(JavaCompile) {
|
||||
@ -295,6 +296,11 @@ class BuildPlugin implements Plugin<Project> {
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
options.encoding = 'UTF-8'
|
||||
}
|
||||
}
|
||||
@ -365,6 +371,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
enableSystemAssertions false
|
||||
|
||||
testLogging {
|
||||
showNumFailuresAtEnd 25
|
||||
slowTests {
|
||||
heartbeat 10
|
||||
summarySize 5
|
||||
@ -409,4 +416,11 @@ class BuildPlugin implements Plugin<Project> {
|
||||
}
|
||||
return test
|
||||
}
|
||||
|
||||
private static configurePrecommit(Project project) {
|
||||
Task precommit = PrecommitTasks.create(project, true)
|
||||
project.check.dependsOn(precommit)
|
||||
project.test.mustRunAfter(precommit)
|
||||
project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided
|
||||
}
|
||||
}
|
||||
|
@ -23,40 +23,41 @@ import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.bundling.Zip
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for an Elasticsearch plugin.
|
||||
*/
|
||||
class PluginBuildPlugin extends BuildPlugin {
|
||||
public class PluginBuildPlugin extends BuildPlugin {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
super.apply(project)
|
||||
configureDependencies(project)
|
||||
// this afterEvaluate must happen before the afterEvaluate added by integTest configure,
|
||||
// this afterEvaluate must happen before the afterEvaluate added by integTest creation,
|
||||
// so that the file name resolution for installing the plugin will be setup
|
||||
project.afterEvaluate {
|
||||
String name = project.pluginProperties.extension.name
|
||||
project.jar.baseName = name
|
||||
project.bundlePlugin.baseName = name
|
||||
|
||||
project.integTest.dependsOn(project.bundlePlugin)
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.dependsOn(project.bundlePlugin)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
RestIntegTestTask.configure(project)
|
||||
RunTask.configure(project)
|
||||
Task bundle = configureBundleTask(project)
|
||||
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
|
||||
project.configurations.getByName('default').extendsFrom = []
|
||||
project.artifacts {
|
||||
archives bundle
|
||||
'default' bundle
|
||||
if (project.path.startsWith(':modules:')) {
|
||||
project.integTest.clusterConfig.module(project)
|
||||
project.tasks.run.clusterConfig.module(project)
|
||||
} else {
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
|
||||
}
|
||||
|
||||
static void configureDependencies(Project project) {
|
||||
private static void configureDependencies(Project project) {
|
||||
project.dependencies {
|
||||
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}"
|
||||
@ -72,21 +73,36 @@ class PluginBuildPlugin extends BuildPlugin {
|
||||
}
|
||||
}
|
||||
|
||||
static Task configureBundleTask(Project project) {
|
||||
PluginPropertiesTask buildProperties = project.tasks.create(name: 'pluginProperties', type: PluginPropertiesTask)
|
||||
File pluginMetadata = project.file("src/main/plugin-metadata")
|
||||
project.sourceSets.test {
|
||||
output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
|
||||
resources {
|
||||
srcDir pluginMetadata
|
||||
}
|
||||
}
|
||||
Task bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties])
|
||||
bundle.configure {
|
||||
from buildProperties
|
||||
from pluginMetadata
|
||||
from project.jar
|
||||
from bundle.project.configurations.runtime - bundle.project.configurations.provided
|
||||
/** Adds an integTest task which runs rest tests */
|
||||
private static void createIntegTestTask(Project project) {
|
||||
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
|
||||
integTest.mustRunAfter(project.precommit, project.test)
|
||||
project.check.dependsOn(integTest)
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a bundlePlugin task which builds the zip containing the plugin jars,
|
||||
* metadata, properties, and packaging files
|
||||
*/
|
||||
private static void createBundleTask(Project project) {
|
||||
File pluginMetadata = project.file('src/main/plugin-metadata')
|
||||
|
||||
// create a task to build the properties file for this plugin
|
||||
PluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', PluginPropertiesTask.class)
|
||||
|
||||
// add the plugin properties and metadata to test resources, so unit tests can
|
||||
// know about the plugin (used by test security code to statically initialize the plugin in unit tests)
|
||||
SourceSet testSourceSet = project.sourceSets.test
|
||||
testSourceSet.output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
|
||||
testSourceSet.resources.srcDir(pluginMetadata)
|
||||
|
||||
// create the actual bundle task, which zips up all the files for the plugin
|
||||
Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties]) {
|
||||
from buildProperties // plugin properties file
|
||||
from pluginMetadata // metadata (eg custom security policy)
|
||||
from project.jar // this plugin's jar
|
||||
from project.configurations.runtime - project.configurations.provided // the dep jars
|
||||
// extra files for the plugin to go into the zip
|
||||
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
|
||||
from('src/main') {
|
||||
include 'config/**'
|
||||
@ -97,6 +113,13 @@ class PluginBuildPlugin extends BuildPlugin {
|
||||
}
|
||||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
return bundle
|
||||
|
||||
// remove jar from the archives (things that will be published), and set it to the zip
|
||||
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
|
||||
project.artifacts.add('archives', bundle)
|
||||
|
||||
// also make the zip the default artifact (used when depending on this project)
|
||||
project.configurations.getByName('default').extendsFrom = []
|
||||
project.artifacts.add('default', bundle)
|
||||
}
|
||||
}
|
||||
|
@ -18,64 +18,104 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.StopActionException
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.VerificationTask
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.security.MessageDigest
|
||||
import java.util.regex.Matcher
|
||||
import java.util.regex.Pattern
|
||||
|
||||
class DependencyLicensesTask extends DefaultTask {
|
||||
/**
|
||||
* A task to check licenses for dependencies.
|
||||
*
|
||||
* There are two parts to the check:
|
||||
* <ul>
|
||||
* <li>LICENSE and NOTICE files</li>
|
||||
* <li>SHA checksums for each dependency jar</li>
|
||||
* </ul>
|
||||
*
|
||||
* The directory to find the license and sha files in defaults to the dir @{code licenses}
|
||||
* in the project directory for this task. You can override this directory:
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* licensesDir = project.file('mybetterlicensedir')
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* The jar files to check default to the dependencies from the default configuration. You
|
||||
* can override this, for example, to only check compile dependencies:
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* dependencies = project.configurations.compile
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* Every jar must have a {@code .sha1} file in the licenses dir. These can be managed
|
||||
* automatically using the {@code updateShas} helper task that is created along
|
||||
* with this task. It will add {@code .sha1} files for new jars that are in dependencies
|
||||
* and remove old {@code .sha1} files that are no longer needed.
|
||||
*
|
||||
* Every jar must also have a LICENSE and NOTICE file. However, multiple jars can share
|
||||
* LICENSE and NOTICE files by mapping a pattern to the same name.
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* mapping from: /lucene-.*/, to: 'lucene'
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
public class DependencyLicensesTask extends DefaultTask {
|
||||
static final String SHA_EXTENSION = '.sha1'
|
||||
|
||||
static Task configure(Project project, Closure closure) {
|
||||
DependencyLicensesTask task = project.tasks.create(type: DependencyLicensesTask, name: 'dependencyLicenses')
|
||||
UpdateShasTask update = project.tasks.create(type: UpdateShasTask, name: 'updateShas')
|
||||
update.parentTask = task
|
||||
task.configure(closure)
|
||||
project.check.dependsOn(task)
|
||||
return task
|
||||
}
|
||||
|
||||
// TODO: we should be able to default this to eg compile deps, but we need to move the licenses
|
||||
// check from distribution to core (ie this should only be run on java projects)
|
||||
/** A collection of jar files that should be checked. */
|
||||
@InputFiles
|
||||
FileCollection dependencies
|
||||
public FileCollection dependencies
|
||||
|
||||
/** The directory to find the license and sha files in. */
|
||||
@InputDirectory
|
||||
File licensesDir = new File(project.projectDir, 'licenses')
|
||||
public File licensesDir = new File(project.projectDir, 'licenses')
|
||||
|
||||
LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
|
||||
/** A map of patterns to prefix, used to find the LICENSE and NOTICE file. */
|
||||
private LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
|
||||
|
||||
/**
|
||||
* Add a mapping from a regex pattern for the jar name, to a prefix to find
|
||||
* the LICENSE and NOTICE file for that jar.
|
||||
*/
|
||||
@Input
|
||||
void mapping(Map<String, String> props) {
|
||||
String from = props.get('from')
|
||||
public void mapping(Map<String, String> props) {
|
||||
String from = props.remove('from')
|
||||
if (from == null) {
|
||||
throw new InvalidUserDataException('Missing "from" setting for license name mapping')
|
||||
}
|
||||
String to = props.get('to')
|
||||
String to = props.remove('to')
|
||||
if (to == null) {
|
||||
throw new InvalidUserDataException('Missing "to" setting for license name mapping')
|
||||
}
|
||||
if (props.isEmpty() == false) {
|
||||
throw new InvalidUserDataException("Unknown properties for mapping on dependencyLicenses: ${props.keySet()}")
|
||||
}
|
||||
mappings.put(from, to)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
void checkDependencies() {
|
||||
// TODO: empty license dir (or error when dir exists and no deps)
|
||||
if (licensesDir.exists() == false && dependencies.isEmpty() == false) {
|
||||
public void checkDependencies() {
|
||||
if (dependencies.isEmpty()) {
|
||||
if (licensesDir.exists()) {
|
||||
throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
|
||||
}
|
||||
return // no dependencies to check
|
||||
} else if (licensesDir.exists() == false) {
|
||||
throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies")
|
||||
}
|
||||
|
||||
|
||||
// order is the same for keys and values iteration since we use a linked hashmap
|
||||
List<String> mapped = new ArrayList<>(mappings.values())
|
||||
Pattern mappingsPattern = Pattern.compile('(' + mappings.keySet().join(')|(') + ')')
|
||||
@ -127,7 +167,7 @@ class DependencyLicensesTask extends DefaultTask {
|
||||
}
|
||||
}
|
||||
|
||||
void checkSha(File jar, String jarName, Set<File> shaFiles) {
|
||||
private void checkSha(File jar, String jarName, Set<File> shaFiles) {
|
||||
File shaFile = new File(licensesDir, jarName + SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
throw new GradleException("Missing SHA for ${jarName}. Run 'gradle updateSHAs' to create")
|
||||
@ -143,7 +183,7 @@ class DependencyLicensesTask extends DefaultTask {
|
||||
shaFiles.remove(shaFile)
|
||||
}
|
||||
|
||||
void checkFile(String name, String jarName, Map<String, Integer> counters, String type) {
|
||||
private void checkFile(String name, String jarName, Map<String, Integer> counters, String type) {
|
||||
String fileName = "${name}-${type}"
|
||||
Integer count = counters.get(fileName)
|
||||
if (count == null) {
|
||||
@ -158,10 +198,12 @@ class DependencyLicensesTask extends DefaultTask {
|
||||
counters.put(fileName, count + 1)
|
||||
}
|
||||
|
||||
static class UpdateShasTask extends DefaultTask {
|
||||
DependencyLicensesTask parentTask
|
||||
/** A helper task to update the sha files in the license dir. */
|
||||
public static class UpdateShasTask extends DefaultTask {
|
||||
private DependencyLicensesTask parentTask
|
||||
|
||||
@TaskAction
|
||||
void updateShas() {
|
||||
public void updateShas() {
|
||||
Set<File> shaFiles = new HashSet<File>()
|
||||
parentTask.licensesDir.eachFile {
|
||||
String name = it.getName()
|
||||
|
@ -19,10 +19,11 @@
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.OutputFiles
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.util.PatternFilterable
|
||||
@ -33,14 +34,19 @@ import java.util.regex.Pattern
|
||||
/**
|
||||
* Checks for patterns in source files for the project which are forbidden.
|
||||
*/
|
||||
class ForbiddenPatternsTask extends DefaultTask {
|
||||
Map<String,String> patterns = new LinkedHashMap<>()
|
||||
PatternFilterable filesFilter = new PatternSet()
|
||||
public class ForbiddenPatternsTask extends DefaultTask {
|
||||
|
||||
/** The rules: a map from the rule name, to a rule regex pattern. */
|
||||
private Map<String,String> patterns = new LinkedHashMap<>()
|
||||
/** A pattern set of which files should be checked. */
|
||||
private PatternFilterable filesFilter = new PatternSet()
|
||||
|
||||
@OutputFile
|
||||
File outputMarker = new File(project.buildDir, "markers/forbiddenPatterns")
|
||||
|
||||
ForbiddenPatternsTask() {
|
||||
public ForbiddenPatternsTask() {
|
||||
description = 'Checks source files for invalid patterns like nocommits or tabs'
|
||||
|
||||
// we always include all source files, and exclude what should not be checked
|
||||
filesFilter.include('**')
|
||||
// exclude known binary extensions
|
||||
@ -52,23 +58,28 @@ class ForbiddenPatternsTask extends DefaultTask {
|
||||
filesFilter.exclude('**/*.crt')
|
||||
filesFilter.exclude('**/*.png')
|
||||
|
||||
// TODO: add compile and test compile outputs as this tasks outputs, so we don't rerun when source files haven't changed
|
||||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
}
|
||||
|
||||
/** Adds a file glob pattern to be excluded */
|
||||
void exclude(String... excludes) {
|
||||
public void exclude(String... excludes) {
|
||||
this.filesFilter.exclude(excludes)
|
||||
}
|
||||
|
||||
/** Adds pattern to forbid */
|
||||
/** Adds a pattern to forbid. T */
|
||||
void rule(Map<String,String> props) {
|
||||
String name = props.get('name')
|
||||
String name = props.remove('name')
|
||||
if (name == null) {
|
||||
throw new IllegalArgumentException('Missing [name] for invalid pattern rule')
|
||||
throw new InvalidUserDataException('Missing [name] for invalid pattern rule')
|
||||
}
|
||||
String pattern = props.get('pattern')
|
||||
String pattern = props.remove('pattern')
|
||||
if (pattern == null) {
|
||||
throw new IllegalArgumentException('Missing [pattern] for invalid pattern rule')
|
||||
throw new InvalidUserDataException('Missing [pattern] for invalid pattern rule')
|
||||
}
|
||||
if (props.isEmpty() == false) {
|
||||
throw new InvalidUserDataException("Unknown arguments for ForbiddenPatterns rule mapping: ${props.keySet()}")
|
||||
}
|
||||
// TODO: fail if pattern contains a newline, it won't work (currently)
|
||||
patterns.put(name, pattern)
|
||||
@ -89,14 +100,14 @@ class ForbiddenPatternsTask extends DefaultTask {
|
||||
Pattern allPatterns = Pattern.compile('(' + patterns.values().join(')|(') + ')')
|
||||
List<String> failures = new ArrayList<>()
|
||||
for (File f : files()) {
|
||||
f.eachLine('UTF-8') { line, lineNumber ->
|
||||
f.eachLine('UTF-8') { String line, int lineNumber ->
|
||||
if (allPatterns.matcher(line).find()) {
|
||||
addErrorMessages(failures, f, (String)line, (int)lineNumber)
|
||||
addErrorMessages(failures, f, line, lineNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (failures.isEmpty() == false) {
|
||||
throw new IllegalArgumentException('Found invalid patterns:\n' + failures.join('\n'))
|
||||
throw new GradleException('Found invalid patterns:\n' + failures.join('\n'))
|
||||
}
|
||||
outputMarker.setText('done', 'UTF-8')
|
||||
}
|
||||
|
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFile
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs CheckJarHell on a classpath.
|
||||
*/
|
||||
public class JarHellTask extends LoggedExec {
|
||||
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
public File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
|
||||
/** The classpath to run jarhell check on, defaults to the test runtime classpath */
|
||||
@InputFile
|
||||
public FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
|
||||
public JarHellTask() {
|
||||
project.afterEvaluate {
|
||||
dependsOn(classpath)
|
||||
description = "Runs CheckJarHell on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
doFirst({
|
||||
/* JarHell doesn't like getting directories that don't exist but
|
||||
gradle isn't especially careful about that. So we have to do it
|
||||
filter it ourselves. */
|
||||
FileCollection taskClasspath = classpath.filter { it.exists() }
|
||||
args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
|
||||
})
|
||||
doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
@ -18,16 +18,10 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApisExtension
|
||||
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.TaskContainer
|
||||
|
||||
/**
|
||||
* Validation tasks which should be run before committing. These run before tests.
|
||||
@ -35,36 +29,34 @@ import org.gradle.api.tasks.TaskContainer
|
||||
class PrecommitTasks {
|
||||
|
||||
/** Adds a precommit task, which depends on non-test verification tasks. */
|
||||
static void configure(Project project) {
|
||||
List precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureForbiddenPatterns(project.tasks),
|
||||
configureJarHell(project)]
|
||||
public static Task create(Project project, boolean includeDependencyLicenses) {
|
||||
|
||||
Map precommitOptions = [
|
||||
name: 'precommit',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs all non-test checks.',
|
||||
dependsOn: precommitTasks
|
||||
]
|
||||
Task precommit = project.tasks.create(precommitOptions)
|
||||
project.check.dependsOn(precommit)
|
||||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class)]
|
||||
|
||||
// delay ordering relative to test tasks, since they may not be setup yet
|
||||
project.afterEvaluate {
|
||||
Task test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
test.mustRunAfter(precommit)
|
||||
}
|
||||
Task integTest = project.tasks.findByName('integTest')
|
||||
if (integTest != null) {
|
||||
integTest.mustRunAfter(precommit)
|
||||
}
|
||||
// tasks with just tests don't need dependency licenses, so this flag makes adding
|
||||
// the task optional
|
||||
if (includeDependencyLicenses) {
|
||||
DependencyLicensesTask dependencyLicenses = project.tasks.create('dependencyLicenses', DependencyLicensesTask.class)
|
||||
precommitTasks.add(dependencyLicenses)
|
||||
// we also create the updateShas helper task that is associated with dependencyLicenses
|
||||
UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class)
|
||||
updateShas.parentTask = dependencyLicenses
|
||||
}
|
||||
|
||||
Map<String, Object> precommitOptions = [
|
||||
name: 'precommit',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs all non-test checks.',
|
||||
dependsOn: precommitTasks
|
||||
]
|
||||
return project.tasks.create(precommitOptions)
|
||||
}
|
||||
|
||||
static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply('de.thetaphi.forbiddenapis')
|
||||
private static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply(ForbiddenApisPlugin.class)
|
||||
project.forbiddenApis {
|
||||
internalRuntimeForbidden = true
|
||||
failOnUnsupportedJava = false
|
||||
@ -75,72 +67,18 @@ class PrecommitTasks {
|
||||
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
|
||||
if (mainForbidden != null) {
|
||||
mainForbidden.configure {
|
||||
bundledSignatures += ['jdk-system-out']
|
||||
signaturesURLs += [
|
||||
getClass().getResource('/forbidden/core-signatures.txt'),
|
||||
getClass().getResource('/forbidden/third-party-signatures.txt')]
|
||||
bundledSignatures += 'jdk-system-out'
|
||||
signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
|
||||
if (testForbidden != null) {
|
||||
testForbidden.configure {
|
||||
signaturesURLs += [getClass().getResource('/forbidden/test-signatures.txt')]
|
||||
signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
|
||||
return forbiddenApis
|
||||
}
|
||||
|
||||
static Task configureForbiddenPatterns(TaskContainer tasks) {
|
||||
Map options = [
|
||||
name: 'forbiddenPatterns',
|
||||
type: ForbiddenPatternsTask,
|
||||
description: 'Checks source files for invalid patterns like nocommits or tabs',
|
||||
]
|
||||
return tasks.create(options) {
|
||||
rule name: 'nocommit', pattern: /nocommit/
|
||||
rule name: 'tab', pattern: /\t/
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a task to run jar hell before on the test classpath.
|
||||
*
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
static Task configureJarHell(Project project) {
|
||||
File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
Exec task = project.tasks.create(name: 'jarHell', type: Exec)
|
||||
FileCollection testClasspath = project.sourceSets.test.runtimeClasspath
|
||||
task.dependsOn(testClasspath)
|
||||
task.inputs.files(testClasspath)
|
||||
task.outputs.file(successMarker)
|
||||
task.executable = new File(project.javaHome, 'bin/java')
|
||||
task.doFirst({
|
||||
/* JarHell doesn't like getting directories that don't exist but
|
||||
gradle isn't especially careful about that. So we have to do it
|
||||
filter it ourselves. */
|
||||
def taskClasspath = testClasspath.filter { it.exists() }
|
||||
task.args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
|
||||
})
|
||||
if (task.logger.isInfoEnabled() == false) {
|
||||
task.standardOutput = new ByteArrayOutputStream()
|
||||
task.errorOutput = task.standardOutput
|
||||
task.ignoreExitValue = true
|
||||
task.doLast({
|
||||
if (execResult.exitValue != 0) {
|
||||
logger.error(standardOutput.toString())
|
||||
throw new GradleException("JarHell failed")
|
||||
}
|
||||
})
|
||||
}
|
||||
task.doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
return task
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.security.MessageDigest
|
||||
|
||||
/**
|
||||
* A task to update shas used by {@code DependencyLicensesCheck}
|
||||
*/
|
||||
public class UpdateShasTask extends DefaultTask {
|
||||
|
||||
/** The parent dependency licenses task to use configuration from */
|
||||
public DependencyLicensesTask parentTask
|
||||
|
||||
public UpdateShasTask() {
|
||||
description = 'Updates the sha files for the dependencyLicenses check'
|
||||
onlyIf { parentTask.licensesDir.exists() }
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void updateShas() {
|
||||
Set<File> shaFiles = new HashSet<File>()
|
||||
parentTask.licensesDir.eachFile {
|
||||
String name = it.getName()
|
||||
if (name.endsWith(DependencyLicensesTask.SHA_EXTENSION)) {
|
||||
shaFiles.add(it)
|
||||
}
|
||||
}
|
||||
for (File dependency : parentTask.dependencies) {
|
||||
String jarName = dependency.getName()
|
||||
File shaFile = new File(parentTask.licensesDir, jarName + DependencyLicensesTask.SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
logger.lifecycle("Adding sha for ${jarName}")
|
||||
String sha = MessageDigest.getInstance("SHA-1").digest(dependency.getBytes()).encodeHex().toString()
|
||||
shaFile.setText(sha, 'UTF-8')
|
||||
} else {
|
||||
shaFiles.remove(shaFile)
|
||||
}
|
||||
}
|
||||
shaFiles.each { shaFile ->
|
||||
logger.lifecycle("Removing unused sha ${shaFile.getName()}")
|
||||
Files.delete(shaFile.toPath())
|
||||
}
|
||||
}
|
||||
}
|
@ -27,7 +27,7 @@ import org.gradle.api.tasks.Input
|
||||
class ClusterConfiguration {
|
||||
|
||||
@Input
|
||||
String distribution = 'zip'
|
||||
String distribution = 'integ-test-zip'
|
||||
|
||||
@Input
|
||||
int numNodes = 1
|
||||
@ -71,6 +71,8 @@ class ClusterConfiguration {
|
||||
|
||||
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||
|
||||
List<Project> modules = new ArrayList<>()
|
||||
|
||||
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
|
||||
|
||||
@Input
|
||||
@ -93,6 +95,12 @@ class ClusterConfiguration {
|
||||
plugins.put(name, pluginProject)
|
||||
}
|
||||
|
||||
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
||||
@Input
|
||||
void module(Project moduleProject) {
|
||||
modules.add(moduleProject)
|
||||
}
|
||||
|
||||
@Input
|
||||
void setupCommand(String name, Object... args) {
|
||||
setupCommands.put(name, args)
|
||||
|
@ -27,9 +27,7 @@ import org.gradle.api.*
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Delete
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.*
|
||||
|
||||
import java.nio.file.Paths
|
||||
|
||||
@ -62,7 +60,12 @@ class ClusterFormationTasks {
|
||||
/** Adds a dependency on the given distribution */
|
||||
static void configureDistributionDependency(Project project, String distro) {
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
String packaging = distro == 'tar' ? 'tar.gz' : distro
|
||||
String packaging = distro
|
||||
if (distro == 'tar') {
|
||||
packaging = 'tar.gz'
|
||||
} else if (distro == 'integ-test-zip') {
|
||||
packaging = 'zip'
|
||||
}
|
||||
project.configurations {
|
||||
elasticsearchDistro
|
||||
}
|
||||
@ -105,6 +108,12 @@ class ClusterFormationTasks {
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
|
||||
// install modules
|
||||
for (Project module : node.config.modules) {
|
||||
String actionName = pluginTaskName('install', module.name, 'Module')
|
||||
setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module)
|
||||
}
|
||||
|
||||
// install plugins
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||
@ -132,8 +141,15 @@ class ClusterFormationTasks {
|
||||
/** Adds a task to extract the elasticsearch distribution */
|
||||
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
List extractDependsOn = [project.configurations.elasticsearchDistro, setup]
|
||||
/* project.configurations.elasticsearchDistro.singleFile will be an
|
||||
external artifact if this is being run by a plugin not living in the
|
||||
elasticsearch source tree. If this is a plugin built in the
|
||||
elasticsearch source tree or this is a distro in the elasticsearch
|
||||
source tree then this should be the version of elasticsearch built
|
||||
by the source tree. If it isn't then Bad Things(TM) will happen. */
|
||||
Task extract
|
||||
switch (node.config.distribution) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
|
||||
from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) }
|
||||
@ -148,6 +164,33 @@ class ClusterFormationTasks {
|
||||
into node.baseDir
|
||||
}
|
||||
break;
|
||||
case 'rpm':
|
||||
File rpmDatabase = new File(node.baseDir, 'rpm-database')
|
||||
File rpmExtracted = new File(node.baseDir, 'rpm-extracted')
|
||||
/* Delay reading the location of the rpm file until task execution */
|
||||
Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers',
|
||||
'--dbpath', rpmDatabase,
|
||||
'--relocate', "/=${rpmExtracted}",
|
||||
'-i', rpm
|
||||
doFirst {
|
||||
rpmDatabase.deleteDir()
|
||||
rpmExtracted.deleteDir()
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 'deb':
|
||||
/* Delay reading the location of the deb file until task execution */
|
||||
File debExtracted = new File(node.baseDir, 'deb-extracted')
|
||||
Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'dpkg-deb', '-x', deb, debExtracted
|
||||
doFirst {
|
||||
debExtracted.deleteDir()
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new InvalidUserDataException("Unknown distribution: ${node.config.distribution}")
|
||||
}
|
||||
@ -172,7 +215,7 @@ class ClusterFormationTasks {
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
writeConfig.doFirst {
|
||||
File configFile = new File(node.homeDir, 'config/elasticsearch.yml')
|
||||
File configFile = new File(node.confDir, 'elasticsearch.yml')
|
||||
logger.info("Configuring ${configFile}")
|
||||
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
|
||||
}
|
||||
@ -185,7 +228,8 @@ class ClusterFormationTasks {
|
||||
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
|
||||
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
|
||||
Closure delayedSrc = {
|
||||
copyConfig.doFirst {
|
||||
// make sure the copy won't be a no-op or act on a directory
|
||||
File srcConfigFile = project.file(extraConfigFile.getValue())
|
||||
if (srcConfigFile.isDirectory()) {
|
||||
throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}")
|
||||
@ -193,11 +237,10 @@ class ClusterFormationTasks {
|
||||
if (srcConfigFile.exists() == false) {
|
||||
throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}")
|
||||
}
|
||||
return srcConfigFile
|
||||
}
|
||||
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
|
||||
copyConfig.from(delayedSrc)
|
||||
.into(destConfigFile.canonicalFile.parentFile)
|
||||
copyConfig.into(destConfigFile.canonicalFile.parentFile)
|
||||
.from({ extraConfigFile.getValue() }) // wrap in closure to delay resolution to execution time
|
||||
.rename { destConfigFile.name }
|
||||
}
|
||||
return copyConfig
|
||||
@ -255,6 +298,20 @@ class ClusterFormationTasks {
|
||||
return copyPlugins
|
||||
}
|
||||
|
||||
static Task configureInstallModuleTask(String name, Project project, Task setup, NodeInfo node, Project module) {
|
||||
if (node.config.distribution != 'integ-test-zip') {
|
||||
throw new GradleException("Module ${module.path} not allowed be installed distributions other than integ-test-zip because they should already have all modules bundled!")
|
||||
}
|
||||
if (module.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot include module ${module.path} which is not an esplugin")
|
||||
}
|
||||
Copy installModule = project.tasks.create(name, Copy.class)
|
||||
installModule.dependsOn(setup)
|
||||
installModule.into(new File(node.homeDir, "modules/${module.name}"))
|
||||
installModule.from({ project.zipTree(module.tasks.bundlePlugin.outputs.files.singleFile) })
|
||||
return installModule
|
||||
}
|
||||
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
|
||||
FileCollection pluginZip
|
||||
if (plugin instanceof Project) {
|
||||
@ -284,18 +341,27 @@ class ClusterFormationTasks {
|
||||
|
||||
/** Adds a task to start an elasticsearch node with the given configuration */
|
||||
static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
String executable
|
||||
List<String> esArgs = []
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable = 'cmd'
|
||||
esArgs.add('/C')
|
||||
esArgs.add('call')
|
||||
} else {
|
||||
executable = 'sh'
|
||||
}
|
||||
|
||||
// this closure is converted into ant nodes by groovy's AntBuilder
|
||||
Closure antRunner = { AntBuilder ant ->
|
||||
ant.exec(executable: node.executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
|
||||
node.env.each { key, value -> env(key: key, value: value) }
|
||||
node.args.each { arg(value: it) }
|
||||
}
|
||||
}
|
||||
|
||||
// this closure is the actual code to run elasticsearch
|
||||
Closure elasticsearchRunner = {
|
||||
// Due to how ant exec works with the spawn option, we lose all stdout/stderr from the
|
||||
// process executed. To work around this, when spawning, we wrap the elasticsearch start
|
||||
// command inside another shell script, which simply internally redirects the output
|
||||
// of the real elasticsearch script. This allows ant to keep the streams open with the
|
||||
// dummy process, but us to have the output available if there is an error in the
|
||||
// elasticsearch start script
|
||||
if (node.config.daemonize) {
|
||||
node.writeWrapperScript()
|
||||
}
|
||||
|
||||
// we must add debug options inside the closure so the config is read at execution time, as
|
||||
// gradle task options are not processed until the end of the configuration phase
|
||||
if (node.config.debug) {
|
||||
@ -303,37 +369,6 @@ class ClusterFormationTasks {
|
||||
node.env['JAVA_OPTS'] = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000'
|
||||
}
|
||||
|
||||
// Due to how ant exec works with the spawn option, we lose all stdout/stderr from the
|
||||
// process executed. To work around this, when spawning, we wrap the elasticsearch start
|
||||
// command inside another shell script, which simply internally redirects the output
|
||||
// of the real elasticsearch script. This allows ant to keep the streams open with the
|
||||
// dummy process, but us to have the output available if there is an error in the
|
||||
// elasticsearch start script
|
||||
String script = node.esScript
|
||||
if (node.config.daemonize) {
|
||||
String scriptName = 'run'
|
||||
String argsPasser = '"$@"'
|
||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
scriptName += '.bat'
|
||||
argsPasser = '%*'
|
||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||
}
|
||||
File wrapperScript = new File(node.cwd, scriptName)
|
||||
wrapperScript.setText("\"${script}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
script = wrapperScript.toString()
|
||||
}
|
||||
|
||||
ant.exec(executable: executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
|
||||
node.env.each { key, value -> env(key: key, value: value) }
|
||||
arg(value: script)
|
||||
node.args.each { arg(value: it) }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// this closure is the actual code to run elasticsearch
|
||||
Closure elasticsearchRunner = {
|
||||
node.getCommandString().eachLine { line -> logger.info(line) }
|
||||
|
||||
if (logger.isInfoEnabled() || node.config.daemonize == false) {
|
||||
@ -405,14 +440,19 @@ class ClusterFormationTasks {
|
||||
// We already log the command at info level. No need to do it twice.
|
||||
node.getCommandString().eachLine { line -> logger.error(line) }
|
||||
}
|
||||
// the waitfor failed, so dump any output we got (may be empty if info logging, but that is ok)
|
||||
logger.error("Node ${node.nodeNum} ant output:")
|
||||
node.buffer.toString('UTF-8').eachLine { line -> logger.error(line) }
|
||||
logger.error("Node ${node.nodeNum} output:")
|
||||
logger.error("|-----------------------------------------")
|
||||
logger.error("| failure marker exists: ${node.failedMarker.exists()}")
|
||||
logger.error("| pid file exists: ${node.pidFile.exists()}")
|
||||
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
|
||||
logger.error("|\n| [ant output]")
|
||||
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
|
||||
// also dump the log file for the startup script (which will include ES logging output to stdout)
|
||||
if (node.startLog.exists()) {
|
||||
logger.error("Node ${node.nodeNum} log:")
|
||||
node.startLog.eachLine { line -> logger.error(line) }
|
||||
logger.error("|\n| [log]")
|
||||
node.startLog.eachLine { line -> logger.error("| ${line}") }
|
||||
}
|
||||
logger.error("|-----------------------------------------")
|
||||
}
|
||||
throw new GradleException(msg)
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
@ -45,6 +46,12 @@ class NodeInfo {
|
||||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
|
||||
/** config directory */
|
||||
File confDir
|
||||
|
||||
/** THE config file */
|
||||
File configFile
|
||||
|
||||
/** working directory for the node process */
|
||||
File cwd
|
||||
|
||||
@ -63,8 +70,14 @@ class NodeInfo {
|
||||
/** arguments to start the node with */
|
||||
List<String> args
|
||||
|
||||
/** Executable to run the bin/elasticsearch with, either cmd or sh */
|
||||
String executable
|
||||
|
||||
/** Path to the elasticsearch start script */
|
||||
String esScript
|
||||
File esScript
|
||||
|
||||
/** script to run when running in the background */
|
||||
File wrapperScript
|
||||
|
||||
/** buffer for ant output when starting this node */
|
||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
@ -77,34 +90,75 @@ class NodeInfo {
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
homeDir = homeDir(baseDir, config.distribution)
|
||||
confDir = confDir(baseDir, config.distribution)
|
||||
configFile = new File(confDir, 'elasticsearch.yml')
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
pluginsTmpDir = new File(baseDir, "plugins tmp")
|
||||
|
||||
args = []
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable = 'cmd'
|
||||
args.add('/C')
|
||||
args.add('"') // quote the entire command
|
||||
wrapperScript = new File(cwd, "run.bat")
|
||||
esScript = new File(homeDir, 'bin/elasticsearch.bat')
|
||||
} else {
|
||||
executable = 'sh'
|
||||
wrapperScript = new File(cwd, "run")
|
||||
esScript = new File(homeDir, 'bin/elasticsearch')
|
||||
}
|
||||
if (config.daemonize) {
|
||||
args.add("${wrapperScript}")
|
||||
} else {
|
||||
args.add("${esScript}")
|
||||
}
|
||||
|
||||
env = [
|
||||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args = config.systemProperties.collect { key, value -> "-D${key}=${value}" }
|
||||
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
args.add("-D${property.getKey()}=${property.getValue()}")
|
||||
}
|
||||
}
|
||||
// running with cmd on windows will look for this with the .bat extension
|
||||
esScript = new File(homeDir, 'bin/elasticsearch').toString()
|
||||
args.add("-Des.path.conf=${confDir}")
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns debug string for the command that started this node. */
|
||||
String getCommandString() {
|
||||
String esCommandString = "Elasticsearch node ${nodeNum} command: ${esScript} "
|
||||
esCommandString += args.join(' ')
|
||||
esCommandString += '\nenvironment:'
|
||||
env.each { k, v -> esCommandString += "\n ${k}: ${v}" }
|
||||
String esCommandString = "\nNode ${nodeNum} configuration:\n"
|
||||
esCommandString += "|-----------------------------------------\n"
|
||||
esCommandString += "| cwd: ${cwd}\n"
|
||||
esCommandString += "| command: ${executable} ${args.join(' ')}\n"
|
||||
esCommandString += '| environment:\n'
|
||||
env.each { k, v -> esCommandString += "| ${k}: ${v}\n" }
|
||||
if (config.daemonize) {
|
||||
esCommandString += "|\n| [${wrapperScript.name}]\n"
|
||||
wrapperScript.eachLine('UTF-8', { line -> esCommandString += " ${line}\n"})
|
||||
}
|
||||
esCommandString += '|\n| [elasticsearch.yml]\n'
|
||||
configFile.eachLine('UTF-8', { line -> esCommandString += "| ${line}\n" })
|
||||
esCommandString += "|-----------------------------------------"
|
||||
return esCommandString
|
||||
}
|
||||
|
||||
void writeWrapperScript() {
|
||||
String argsPasser = '"$@"'
|
||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
argsPasser = '%*'
|
||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||
}
|
||||
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Returns the http port for this node */
|
||||
int httpPort() {
|
||||
return config.baseHttpPort + nodeNum
|
||||
@ -119,13 +173,32 @@ class NodeInfo {
|
||||
static File homeDir(File baseDir, String distro) {
|
||||
String path
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
path = "elasticsearch-${VersionProperties.elasticsearch}"
|
||||
break;
|
||||
break
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
path = "${distro}-extracted/usr/share/elasticsearch"
|
||||
break
|
||||
default:
|
||||
throw new InvalidUserDataException("Unknown distribution: ${distro}")
|
||||
}
|
||||
return new File(baseDir, path)
|
||||
}
|
||||
|
||||
static File confDir(File baseDir, String distro) {
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
return new File(homeDir(baseDir, distro), 'config')
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")
|
||||
default:
|
||||
throw new InvalidUserDataException("Unkown distribution: ${distro}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,54 +31,38 @@ import org.gradle.util.ConfigureUtil
|
||||
* Runs integration tests, but first starts an ES cluster,
|
||||
* and passes the ES cluster info as parameters to the tests.
|
||||
*/
|
||||
class RestIntegTestTask extends RandomizedTestingTask {
|
||||
public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration()
|
||||
|
||||
/** Flag indicating whether the rest tests in the rest spec should be run. */
|
||||
@Input
|
||||
boolean includePackaged = false
|
||||
|
||||
static RestIntegTestTask configure(Project project) {
|
||||
Map integTestOptions = [
|
||||
name: 'integTest',
|
||||
type: RestIntegTestTask,
|
||||
dependsOn: 'testClasses',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs rest tests against an elasticsearch cluster.'
|
||||
]
|
||||
RestIntegTestTask integTest = project.tasks.create(integTestOptions)
|
||||
integTest.configure(BuildPlugin.commonTestConfig(project))
|
||||
integTest.configure {
|
||||
include '**/*IT.class'
|
||||
systemProperty 'tests.rest.load_packaged', 'false'
|
||||
}
|
||||
RandomizedTestingTask test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
integTest.classpath = test.classpath
|
||||
integTest.testClassesDir = test.testClassesDir
|
||||
integTest.mustRunAfter(test)
|
||||
}
|
||||
project.check.dependsOn(integTest)
|
||||
public RestIntegTestTask() {
|
||||
description = 'Runs rest tests against an elasticsearch cluster.'
|
||||
group = JavaBasePlugin.VERIFICATION_GROUP
|
||||
dependsOn(project.testClasses)
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir = project.sourceSets.test.output.classesDir
|
||||
|
||||
// start with the common test configuration
|
||||
configure(BuildPlugin.commonTestConfig(project))
|
||||
// override/add more for rest tests
|
||||
parallelism = '1'
|
||||
include('**/*IT.class')
|
||||
systemProperty('tests.rest.load_packaged', 'false')
|
||||
|
||||
// copy the rest spec/tests into the test resources
|
||||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
integTest.dependsOn(RestSpecHack.configureTask(project, integTest.includePackaged))
|
||||
dependsOn(RestSpecHack.configureTask(project, includePackaged))
|
||||
systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}")
|
||||
}
|
||||
return integTest
|
||||
}
|
||||
|
||||
RestIntegTestTask() {
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
Task test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
mustRunAfter(test)
|
||||
}
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
configure {
|
||||
parallelism '1'
|
||||
systemProperty 'tests.cluster', "localhost:${clusterConfig.baseTransportPort}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,11 +75,11 @@ class RestIntegTestTask extends RandomizedTestingTask {
|
||||
}
|
||||
|
||||
@Input
|
||||
void cluster(Closure closure) {
|
||||
public void cluster(Closure closure) {
|
||||
ConfigureUtil.configure(closure, clusterConfig)
|
||||
}
|
||||
|
||||
ClusterConfiguration getCluster() {
|
||||
public ClusterConfiguration getCluster() {
|
||||
return clusterConfig
|
||||
}
|
||||
}
|
||||
|
@ -28,12 +28,12 @@ import org.gradle.api.tasks.Copy
|
||||
* currently must be available on the local filesystem. This class encapsulates
|
||||
* setting up tasks to copy the rest spec api to test resources.
|
||||
*/
|
||||
class RestSpecHack {
|
||||
public class RestSpecHack {
|
||||
/**
|
||||
* Sets dependencies needed to copy the rest spec.
|
||||
* @param project The project to add rest spec dependency to
|
||||
*/
|
||||
static void configureDependencies(Project project) {
|
||||
public static void configureDependencies(Project project) {
|
||||
project.configurations {
|
||||
restSpec
|
||||
}
|
||||
@ -48,7 +48,7 @@ class RestSpecHack {
|
||||
* @param project The project to add the copy task to
|
||||
* @param includePackagedTests true if the packaged tests should be copied, false otherwise
|
||||
*/
|
||||
static Task configureTask(Project project, boolean includePackagedTests) {
|
||||
public static Task configureTask(Project project, boolean includePackagedTests) {
|
||||
Map copyRestSpecProps = [
|
||||
name : 'copyRestSpec',
|
||||
type : Copy,
|
||||
@ -65,7 +65,6 @@ class RestSpecHack {
|
||||
project.idea {
|
||||
module {
|
||||
if (scopes.TEST != null) {
|
||||
// TODO: need to add the TEST scope somehow for rest test plugin...
|
||||
scopes.TEST.plus.add(project.configurations.restSpec)
|
||||
}
|
||||
}
|
||||
|
@ -18,22 +18,19 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
|
||||
/** Configures the build to have a rest integration test. */
|
||||
class RestTestPlugin implements Plugin<Project> {
|
||||
/** A plugin to add rest integration tests. Used for qa projects. */
|
||||
public class RestTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(StandaloneTestBasePlugin)
|
||||
|
||||
RandomizedTestingTask integTest = RestIntegTestTask.configure(project)
|
||||
RestSpecHack.configureDependencies(project)
|
||||
integTest.configure {
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir project.sourceSets.test.output.classesDir
|
||||
}
|
||||
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
|
||||
integTest.cluster.distribution = 'zip' // rest tests should run with the real zip
|
||||
integTest.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(integTest)
|
||||
}
|
||||
}
|
||||
|
@ -2,13 +2,17 @@ package org.elasticsearch.gradle.test
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
class RunTask extends DefaultTask {
|
||||
public class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false)
|
||||
|
||||
RunTask() {
|
||||
public RunTask() {
|
||||
description = "Runs elasticsearch with '${project.path}'"
|
||||
group = 'Verification'
|
||||
project.afterEvaluate {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
}
|
||||
@ -22,11 +26,10 @@ class RunTask extends DefaultTask {
|
||||
clusterConfig.debug = enabled;
|
||||
}
|
||||
|
||||
static void configure(Project project) {
|
||||
RunTask task = project.tasks.create(
|
||||
name: 'run',
|
||||
type: RunTask,
|
||||
description: "Runs elasticsearch with '${project.path}'",
|
||||
group: 'Verification')
|
||||
/** Configure the cluster that will be run. */
|
||||
@Override
|
||||
public Task configure(Closure closure) {
|
||||
ConfigureUtil.configure(closure, clusterConfig)
|
||||
return this
|
||||
}
|
||||
}
|
||||
|
@ -27,12 +27,13 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.plugins.ide.eclipse.model.EclipseClasspath
|
||||
|
||||
/** Configures the build to have a rest integration test. */
|
||||
class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
public class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(JavaBasePlugin)
|
||||
project.pluginManager.apply(RandomizedTestingPlugin)
|
||||
|
||||
@ -40,25 +41,15 @@ class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
BuildPlugin.configureRepositories(project)
|
||||
|
||||
// only setup tests to build
|
||||
project.sourceSets {
|
||||
test
|
||||
}
|
||||
project.dependencies {
|
||||
testCompile "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}"
|
||||
}
|
||||
project.sourceSets.create('test')
|
||||
project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}")
|
||||
|
||||
project.eclipse {
|
||||
classpath {
|
||||
sourceSets = [project.sourceSets.test]
|
||||
plusConfigurations = [project.configurations.testRuntime]
|
||||
}
|
||||
}
|
||||
project.idea {
|
||||
module {
|
||||
testSourceDirs += project.sourceSets.test.java.srcDirs
|
||||
scopes['TEST'] = [plus: [project.configurations.testRuntime]]
|
||||
}
|
||||
}
|
||||
PrecommitTasks.configure(project)
|
||||
project.eclipse.classpath.sourceSets = [project.sourceSets.test]
|
||||
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
|
||||
project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs
|
||||
project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]]
|
||||
|
||||
PrecommitTasks.create(project, false)
|
||||
project.check.dependsOn(project.precommit)
|
||||
}
|
||||
}
|
||||
|
@ -25,11 +25,11 @@ import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
||||
/** Configures the build to have only unit tests. */
|
||||
class StandaloneTestPlugin implements Plugin<Project> {
|
||||
/** A plugin to add tests only. Used for QA tests that run arbitrary unit tests. */
|
||||
public class StandaloneTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(StandaloneTestBasePlugin)
|
||||
|
||||
Map testOptions = [
|
||||
@ -41,10 +41,9 @@ class StandaloneTestPlugin implements Plugin<Project> {
|
||||
]
|
||||
RandomizedTestingTask test = project.tasks.create(testOptions)
|
||||
test.configure(BuildPlugin.commonTestConfig(project))
|
||||
test.configure {
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir project.sourceSets.test.output.classesDir
|
||||
}
|
||||
test.classpath = project.sourceSets.test.runtimeClasspath
|
||||
test.testClassesDir project.sourceSets.test.output.classesDir
|
||||
test.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(test)
|
||||
}
|
||||
}
|
||||
|
@ -112,3 +112,7 @@ java.lang.System#setProperty(java.lang.String,java.lang.String)
|
||||
java.lang.System#clearProperty(java.lang.String)
|
||||
java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a read-only view
|
||||
|
||||
@defaultMessage Avoid unchecked warnings by using Collections#empty(List|Map|Set) methods
|
||||
java.util.Collections#EMPTY_LIST
|
||||
java.util.Collections#EMPTY_MAP
|
||||
java.util.Collections#EMPTY_SET
|
||||
|
@ -90,3 +90,12 @@ org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
||||
|
@ -1,66 +0,0 @@
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage unsafe encoders/decoders have problems in the lzf compress library. Use variants of encode/decode functions which take Encoder/Decoder.
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkDecoder#<init>()
|
||||
com.ning.compress.lzf.parallel.CompressTask
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkDecoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File, boolean)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String, boolean)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[])
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFCompressingInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFDecoder#fastDecoder()
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int, byte[])
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, boolean)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler, boolean)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler, com.ning.compress.BufferRecycler)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
@ -62,12 +62,9 @@ dependencies {
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
|
||||
compile "org.yaml:snakeyaml:1.15" // used by jackson yaml
|
||||
|
||||
// network stack
|
||||
compile 'io.netty:netty:3.10.5.Final'
|
||||
// compression of transport protocol
|
||||
compile 'com.ning:compress-lzf:1.0.2'
|
||||
// percentiles aggregation
|
||||
compile 'com.tdunning:t-digest:3.0'
|
||||
// precentil ranks aggregation
|
||||
@ -117,6 +114,9 @@ forbiddenPatterns {
|
||||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
if (isEclipse == false || project.path == ":core-tests") {
|
||||
task integTest(type: RandomizedTestingTask,
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
@ -129,8 +129,4 @@ if (isEclipse == false || project.path == ":core-tests") {
|
||||
}
|
||||
check.dependsOn integTest
|
||||
integTest.mustRunAfter test
|
||||
|
||||
RestSpecHack.configureDependencies(project)
|
||||
Task copyRestSpec = RestSpecHack.configureTask(project, true)
|
||||
integTest.dependsOn copyRestSpec
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
MERGE_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.MergeMappingException.class, org.elasticsearch.index.mapper.MergeMappingException::new, 87),
|
||||
// 87 used to be for MergeMappingException
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
|
@ -74,7 +74,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return currentState;
|
||||
|
@ -72,14 +72,14 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
private HttpInfo http;
|
||||
|
||||
@Nullable
|
||||
private PluginsInfo plugins;
|
||||
private PluginsAndModules plugins;
|
||||
|
||||
NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsInfo plugins) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
@ -172,7 +172,7 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public PluginsInfo getPlugins() {
|
||||
public PluginsAndModules getPlugins() {
|
||||
return this.plugins;
|
||||
}
|
||||
|
||||
@ -217,7 +217,8 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
http = HttpInfo.readHttpInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
plugins = PluginsInfo.readPluginsInfo(in);
|
||||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,115 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Information about plugins and modules
|
||||
*/
|
||||
public class PluginsAndModules implements Streamable, ToXContent {
|
||||
private List<PluginInfo> plugins;
|
||||
private List<PluginInfo> modules;
|
||||
|
||||
public PluginsAndModules() {
|
||||
plugins = new ArrayList<>();
|
||||
modules = new ArrayList<>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an ordered list based on plugins name
|
||||
*/
|
||||
public List<PluginInfo> getPluginInfos() {
|
||||
List<PluginInfo> plugins = new ArrayList<>(this.plugins);
|
||||
Collections.sort(plugins, (p1, p2) -> p1.getName().compareTo(p2.getName()));
|
||||
return plugins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an ordered list based on modules name
|
||||
*/
|
||||
public List<PluginInfo> getModuleInfos() {
|
||||
List<PluginInfo> modules = new ArrayList<>(this.modules);
|
||||
Collections.sort(modules, (p1, p2) -> p1.getName().compareTo(p2.getName()));
|
||||
return modules;
|
||||
}
|
||||
|
||||
public void addPlugin(PluginInfo info) {
|
||||
plugins.add(info);
|
||||
}
|
||||
|
||||
public void addModule(PluginInfo info) {
|
||||
modules.add(info);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (plugins.isEmpty() == false || modules.isEmpty() == false) {
|
||||
throw new IllegalStateException("instance is already populated");
|
||||
}
|
||||
int plugins_size = in.readInt();
|
||||
for (int i = 0; i < plugins_size; i++) {
|
||||
plugins.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
int modules_size = in.readInt();
|
||||
for (int i = 0; i < modules_size; i++) {
|
||||
modules.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(plugins.size());
|
||||
for (PluginInfo plugin : getPluginInfos()) {
|
||||
plugin.writeTo(out);
|
||||
}
|
||||
out.writeInt(modules.size());
|
||||
for (PluginInfo module : getModuleInfos()) {
|
||||
module.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("plugins");
|
||||
for (PluginInfo pluginInfo : getPluginInfos()) {
|
||||
pluginInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
// TODO: not ideal, make a better api for this (e.g. with jar metadata, and so on)
|
||||
builder.startArray("modules");
|
||||
for (PluginInfo moduleInfo : getModuleInfos()) {
|
||||
moduleInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
@ -1,101 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
public class PluginsInfo implements Streamable, ToXContent {
|
||||
static final class Fields {
|
||||
static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
|
||||
}
|
||||
|
||||
private List<PluginInfo> infos;
|
||||
|
||||
public PluginsInfo() {
|
||||
infos = new ArrayList<>();
|
||||
}
|
||||
|
||||
public PluginsInfo(int size) {
|
||||
infos = new ArrayList<>(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return an ordered list based on plugins name
|
||||
*/
|
||||
public List<PluginInfo> getInfos() {
|
||||
Collections.sort(infos, new Comparator<PluginInfo>() {
|
||||
@Override
|
||||
public int compare(final PluginInfo o1, final PluginInfo o2) {
|
||||
return o1.getName().compareTo(o2.getName());
|
||||
}
|
||||
});
|
||||
|
||||
return infos;
|
||||
}
|
||||
|
||||
public void add(PluginInfo info) {
|
||||
infos.add(info);
|
||||
}
|
||||
|
||||
public static PluginsInfo readPluginsInfo(StreamInput in) throws IOException {
|
||||
PluginsInfo infos = new PluginsInfo();
|
||||
infos.readFrom(in);
|
||||
return infos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
int plugins_size = in.readInt();
|
||||
for (int i = 0; i < plugins_size; i++) {
|
||||
infos.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(infos.size());
|
||||
for (PluginInfo plugin : getInfos()) {
|
||||
plugin.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(Fields.PLUGINS);
|
||||
for (PluginInfo pluginInfo : getInfos()) {
|
||||
pluginInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
@ -68,7 +68,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterRerouteResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
@ -91,7 +91,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
||||
final Settings.Builder transientUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
|
||||
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings",
|
||||
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile boolean changed = false;
|
||||
|
||||
@ -132,7 +133,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
||||
// in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible
|
||||
// to the components until the ClusterStateListener instances have been invoked, but are visible after
|
||||
// the first update task has been completed.
|
||||
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings",
|
||||
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
public boolean mustAck(DiscoveryNode discoveryNode) {
|
||||
|
@ -74,7 +74,7 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
||||
versions.add(nodeResponse.nodeInfo().getVersion());
|
||||
process.addNodeStats(nodeResponse.nodeStats());
|
||||
jvm.addNodeInfoStats(nodeResponse.nodeInfo(), nodeResponse.nodeStats());
|
||||
plugins.addAll(nodeResponse.nodeInfo().getPlugins().getInfos());
|
||||
plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
|
||||
|
||||
// now do the stats that should be deduped by hardware (implemented by ip deduping)
|
||||
TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();
|
||||
|
@ -46,9 +46,10 @@ import java.util.List;
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes
|
||||
* it in a single batch.
|
||||
* A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
|
||||
* and allows to executes it in a single batch.
|
||||
*
|
||||
* Note that we only support refresh on the bulk request not per item.
|
||||
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
|
||||
*/
|
||||
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest {
|
||||
@ -89,6 +90,12 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
return add(request, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a request to the current BulkRequest.
|
||||
* @param request Request to add
|
||||
* @param payload Optional payload
|
||||
* @return the current bulk request
|
||||
*/
|
||||
public BulkRequest add(ActionRequest request, @Nullable Object payload) {
|
||||
if (request instanceof IndexRequest) {
|
||||
add((IndexRequest) request, payload);
|
||||
@ -127,7 +134,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) {
|
||||
requests.add(request);
|
||||
addPayload(payload);
|
||||
sizeInBytes += request.source().length() + REQUEST_OVERHEAD;
|
||||
// lack of source is validated in validate() method
|
||||
sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -292,7 +300,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
String parent = null;
|
||||
String[] fields = defaultFields;
|
||||
String timestamp = null;
|
||||
Long ttl = null;
|
||||
TimeValue ttl = null;
|
||||
String opType = null;
|
||||
long version = Versions.MATCH_ANY;
|
||||
VersionType versionType = VersionType.INTERNAL;
|
||||
@ -325,9 +333,9 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
timestamp = parser.text();
|
||||
} else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName).millis();
|
||||
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName);
|
||||
} else {
|
||||
ttl = parser.longValue();
|
||||
ttl = new TimeValue(parser.longValue());
|
||||
}
|
||||
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
|
||||
opType = parser.text();
|
||||
@ -478,8 +486,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
if (requests.isEmpty()) {
|
||||
validationException = addValidationError("no requests added", validationException);
|
||||
}
|
||||
for (int i = 0; i < requests.size(); i++) {
|
||||
ActionRequestValidationException ex = requests.get(i).validate();
|
||||
for (ActionRequest request : requests) {
|
||||
// We first check if refresh has been set
|
||||
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
|
||||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
|
||||
(request instanceof IndexRequest && ((IndexRequest)request).refresh())) {
|
||||
validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException);
|
||||
}
|
||||
ActionRequestValidationException ex = request.validate();
|
||||
if (ex != null) {
|
||||
if (validationException == null) {
|
||||
validationException = new ActionRequestValidationException();
|
||||
|
@ -335,7 +335,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
||||
indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index());
|
||||
}
|
||||
|
||||
return executeIndexRequestOnPrimary(request, indexRequest, indexShard);
|
||||
return executeIndexRequestOnPrimary(indexRequest, indexShard);
|
||||
}
|
||||
|
||||
private WriteResult<DeleteResponse> shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) {
|
||||
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
@ -136,7 +137,8 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
private String parent;
|
||||
@Nullable
|
||||
private String timestamp;
|
||||
private long ttl = -1;
|
||||
@Nullable
|
||||
private TimeValue ttl;
|
||||
|
||||
private BytesReference source;
|
||||
|
||||
@ -229,6 +231,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
if (!versionType.validateVersionForWrites(version)) {
|
||||
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
|
||||
}
|
||||
|
||||
if (ttl != null) {
|
||||
if (ttl.millis() < 0) {
|
||||
validationException = addValidationError("ttl must not be negative", validationException);
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@ -324,22 +332,33 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise. Setting it
|
||||
* to <tt>null</tt> will reset to have no ttl.
|
||||
* Sets the ttl value as a time value expression.
|
||||
*/
|
||||
public IndexRequest ttl(Long ttl) throws ElasticsearchGenerationException {
|
||||
if (ttl == null) {
|
||||
this.ttl = -1;
|
||||
return this;
|
||||
}
|
||||
if (ttl <= 0) {
|
||||
throw new IllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]");
|
||||
}
|
||||
public IndexRequest ttl(String ttl) {
|
||||
this.ttl = TimeValue.parseTimeValue(ttl, null, "ttl");
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ttl as a {@link TimeValue} instance.
|
||||
*/
|
||||
public IndexRequest ttl(TimeValue ttl) {
|
||||
this.ttl = ttl;
|
||||
return this;
|
||||
}
|
||||
|
||||
public long ttl() {
|
||||
/**
|
||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
||||
*/
|
||||
public IndexRequest ttl(long ttl) {
|
||||
this.ttl = new TimeValue(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ttl as a {@link TimeValue}
|
||||
*/
|
||||
public TimeValue ttl() {
|
||||
return this.ttl;
|
||||
}
|
||||
|
||||
@ -665,7 +684,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
timestamp = in.readOptionalString();
|
||||
ttl = in.readLong();
|
||||
ttl = in.readBoolean() ? TimeValue.readTimeValue(in) : null;
|
||||
source = in.readBytesReference();
|
||||
|
||||
opType = OpType.fromId(in.readByte());
|
||||
@ -682,7 +701,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
out.writeOptionalString(timestamp);
|
||||
out.writeLong(ttl);
|
||||
if (ttl == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ttl.writeTo(out);
|
||||
}
|
||||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.id());
|
||||
out.writeBoolean(refresh);
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
@ -254,9 +255,27 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
||||
return this;
|
||||
}
|
||||
|
||||
// Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise.
|
||||
/**
|
||||
* Sets the ttl value as a time value expression.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(String ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(long ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ttl as a {@link TimeValue} instance.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(TimeValue ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
|
||||
|
||||
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
|
||||
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(request, indexShard);
|
||||
|
||||
final IndexResponse response = result.response;
|
||||
final Translog.Location location = result.location;
|
||||
|
@ -223,7 +223,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
throw requestBlockException;
|
||||
}
|
||||
|
||||
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
|
||||
}
|
||||
ShardsIterator shardIt = shards(clusterState, request, concreteIndices);
|
||||
nodeIds = new HashMap<>();
|
||||
|
||||
@ -300,7 +302,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
}
|
||||
|
||||
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
}
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
// the current implementation of TransportService#sendRequest guards against this
|
||||
@ -351,7 +355,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception {
|
||||
List<ShardRouting> shards = request.getShards();
|
||||
final int totalShards = shards.size();
|
||||
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
|
||||
}
|
||||
final Object[] shardResultOrExceptions = new Object[totalShards];
|
||||
|
||||
int shardIndex = -1;
|
||||
@ -375,10 +381,14 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
||||
|
||||
private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) {
|
||||
try {
|
||||
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
}
|
||||
ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting);
|
||||
shardResults[shardIndex] = result;
|
||||
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
|
||||
e.setIndex(shardRouting.getIndex());
|
||||
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionWriteResponse;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest.OpType;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
@ -1074,23 +1073,22 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
|
||||
/** Utility method to create either an index or a create operation depending
|
||||
* on the {@link OpType} of the request. */
|
||||
private final Engine.Index prepareIndexOperationOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) {
|
||||
private Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
|
||||
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
|
||||
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
|
||||
return indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
|
||||
|
||||
}
|
||||
|
||||
/** Execute the given {@link IndexRequest} on a primary shard, throwing a
|
||||
* {@link RetryOnPrimaryException} if the operation needs to be re-tried. */
|
||||
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
||||
protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard) throws Throwable {
|
||||
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
if (update != null) {
|
||||
final String indexName = shardId.getIndex();
|
||||
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
|
||||
operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
|
||||
operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
throw new RetryOnPrimaryException(shardId,
|
||||
|
@ -88,7 +88,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
throw new DocumentMissingException(new ShardId(request.index(), request.shardId()), request.type(), request.id());
|
||||
}
|
||||
IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
|
||||
Long ttl = indexRequest.ttl();
|
||||
TimeValue ttl = indexRequest.ttl();
|
||||
if (request.scriptedUpsert() && request.script() != null) {
|
||||
// Run the script to perform the create logic
|
||||
IndexRequest upsert = request.upsertRequest();
|
||||
@ -99,7 +99,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
ctx.put("_source", upsertDoc);
|
||||
ctx = executeScript(request, ctx);
|
||||
//Allow the script to set TTL using ctx._ttl
|
||||
if (ttl < 0) {
|
||||
if (ttl == null) {
|
||||
ttl = getTTLFromScriptContext(ctx);
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
indexRequest.index(request.index()).type(request.type()).id(request.id())
|
||||
// it has to be a "create!"
|
||||
.create(true)
|
||||
.ttl(ttl == null || ttl < 0 ? null : ttl)
|
||||
.ttl(ttl)
|
||||
.refresh(request.refresh())
|
||||
.routing(request.routing())
|
||||
.parent(request.parent())
|
||||
@ -151,7 +151,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
|
||||
String operation = null;
|
||||
String timestamp = null;
|
||||
Long ttl = null;
|
||||
TimeValue ttl = null;
|
||||
final Map<String, Object> updatedSourceAsMap;
|
||||
final XContentType updateSourceContentType = sourceAndContent.v1();
|
||||
String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
|
||||
@ -160,7 +160,7 @@ public class UpdateHelper extends AbstractComponent {
|
||||
if (request.script() == null && request.doc() != null) {
|
||||
IndexRequest indexRequest = request.doc();
|
||||
updatedSourceAsMap = sourceAndContent.v2();
|
||||
if (indexRequest.ttl() > 0) {
|
||||
if (indexRequest.ttl() != null) {
|
||||
ttl = indexRequest.ttl();
|
||||
}
|
||||
timestamp = indexRequest.timestamp();
|
||||
@ -211,9 +211,9 @@ public class UpdateHelper extends AbstractComponent {
|
||||
// apply script to update the source
|
||||
// No TTL has been given in the update script so we keep previous TTL value if there is one
|
||||
if (ttl == null) {
|
||||
ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
||||
if (ttl != null) {
|
||||
ttl = ttl - TimeValue.nsecToMSec(System.nanoTime() - getDateNS); // It is an approximation of exact TTL value, could be improved
|
||||
Long ttlAsLong = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
||||
if (ttlAsLong != null) {
|
||||
ttl = new TimeValue(ttlAsLong - TimeValue.nsecToMSec(System.nanoTime() - getDateNS));// It is an approximation of exact TTL value, could be improved
|
||||
}
|
||||
}
|
||||
|
||||
@ -256,17 +256,15 @@ public class UpdateHelper extends AbstractComponent {
|
||||
return ctx;
|
||||
}
|
||||
|
||||
private Long getTTLFromScriptContext(Map<String, Object> ctx) {
|
||||
Long ttl = null;
|
||||
private TimeValue getTTLFromScriptContext(Map<String, Object> ctx) {
|
||||
Object fetchedTTL = ctx.get("_ttl");
|
||||
if (fetchedTTL != null) {
|
||||
if (fetchedTTL instanceof Number) {
|
||||
ttl = ((Number) fetchedTTL).longValue();
|
||||
} else {
|
||||
ttl = TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl").millis();
|
||||
return new TimeValue(((Number) fetchedTTL).longValue());
|
||||
}
|
||||
return TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl");
|
||||
}
|
||||
return ttl;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -337,13 +335,10 @@ public class UpdateHelper extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
public static enum Operation {
|
||||
|
||||
public enum Operation {
|
||||
UPSERT,
|
||||
INDEX,
|
||||
DELETE,
|
||||
NONE
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
@ -325,7 +326,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document. Note that if detectNoop is true (the default)
|
||||
* Set the new ttl of the document as a long. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
@ -333,4 +334,24 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document as a time value expression. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
public UpdateRequestBuilder setTtl(String ttl) {
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document as a {@link TimeValue} instance. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
public UpdateRequestBuilder setTtl(TimeValue ttl) {
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -217,4 +217,88 @@ final class JNAKernel32Library {
|
||||
* @return true if the function succeeds.
|
||||
*/
|
||||
native boolean CloseHandle(Pointer handle);
|
||||
|
||||
/**
|
||||
* Creates or opens a new job object
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param jobAttributes security attributes
|
||||
* @param name job name
|
||||
* @return job handle if the function succeeds
|
||||
*/
|
||||
native Pointer CreateJobObjectW(Pointer jobAttributes, String name);
|
||||
|
||||
/**
|
||||
* Associates a process with an existing job
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param job job handle
|
||||
* @param process process handle
|
||||
* @return true if the function succeeds
|
||||
*/
|
||||
native boolean AssignProcessToJobObject(Pointer job, Pointer process);
|
||||
|
||||
/**
|
||||
* Basic limit information for a job object
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx
|
||||
*/
|
||||
public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference {
|
||||
public long PerProcessUserTimeLimit;
|
||||
public long PerJobUserTimeLimit;
|
||||
public int LimitFlags;
|
||||
public SizeT MinimumWorkingSetSize;
|
||||
public SizeT MaximumWorkingSetSize;
|
||||
public int ActiveProcessLimit;
|
||||
public Pointer Affinity;
|
||||
public int PriorityClass;
|
||||
public int SchedulingClass;
|
||||
|
||||
@Override
|
||||
protected List<String> getFieldOrder() {
|
||||
return Arrays.asList(new String[] {
|
||||
"PerProcessUserTimeLimit", "PerJobUserTimeLimit", "LimitFlags", "MinimumWorkingSetSize",
|
||||
"MaximumWorkingSetSize", "ActiveProcessLimit", "Affinity", "PriorityClass", "SchedulingClass"
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Constant for JOBOBJECT_BASIC_LIMIT_INFORMATION in Query/Set InformationJobObject
|
||||
*/
|
||||
static final int JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS = 2;
|
||||
|
||||
/**
|
||||
* Constant for LimitFlags, indicating a process limit has been set
|
||||
*/
|
||||
static final int JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8;
|
||||
|
||||
/**
|
||||
* Get job limit and state information
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param job job handle
|
||||
* @param infoClass information class constant
|
||||
* @param info pointer to information structure
|
||||
* @param infoLength size of information structure
|
||||
* @param returnLength length of data written back to structure (or null if not wanted)
|
||||
* @return true if the function succeeds
|
||||
*/
|
||||
native boolean QueryInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength, Pointer returnLength);
|
||||
|
||||
/**
|
||||
* Set job limit and state information
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param job job handle
|
||||
* @param infoClass information class constant
|
||||
* @param info pointer to information structure
|
||||
* @param infoLength size of information structure
|
||||
* @return true if the function succeeds
|
||||
*/
|
||||
native boolean SetInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ import java.util.Map;
|
||||
* Installs a limited form of secure computing mode,
|
||||
* to filters system calls to block process execution.
|
||||
* <p>
|
||||
* This is only supported on the Linux, Solaris, FreeBSD, OpenBSD, and Mac OS X operating systems.
|
||||
* This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows.
|
||||
* <p>
|
||||
* On Linux it currently supports amd64 and i386 architectures, requires Linux kernel 3.5 or above, and requires
|
||||
* {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} compiled into the kernel.
|
||||
@ -80,6 +80,8 @@ import java.util.Map;
|
||||
* <li>{@code process-exec}</li>
|
||||
* </ul>
|
||||
* <p>
|
||||
* On Windows, process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}.
|
||||
* <p>
|
||||
* This is not intended as a sandbox. It is another level of security, mostly intended to annoy
|
||||
* security researchers and make their lives more difficult in achieving "remote execution" exploits.
|
||||
* @see <a href="http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt">
|
||||
@ -329,7 +331,8 @@ final class Seccomp {
|
||||
case 1: break; // already set by caller
|
||||
default:
|
||||
int errno = Native.getLastError();
|
||||
if (errno == ENOSYS) {
|
||||
if (errno == EINVAL) {
|
||||
// friendly error, this will be the typical case for an old kernel
|
||||
throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
|
||||
} else {
|
||||
throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno));
|
||||
@ -561,6 +564,48 @@ final class Seccomp {
|
||||
logger.debug("BSD RLIMIT_NPROC initialization successful");
|
||||
}
|
||||
|
||||
// windows impl via job ActiveProcessLimit
|
||||
|
||||
static void windowsImpl() {
|
||||
if (!Constants.WINDOWS) {
|
||||
throw new IllegalStateException("bug: should not be trying to initialize ActiveProcessLimit for an unsupported OS");
|
||||
}
|
||||
|
||||
JNAKernel32Library lib = JNAKernel32Library.getInstance();
|
||||
|
||||
// create a new Job
|
||||
Pointer job = lib.CreateJobObjectW(null, null);
|
||||
if (job == null) {
|
||||
throw new UnsupportedOperationException("CreateJobObject: " + Native.getLastError());
|
||||
}
|
||||
|
||||
try {
|
||||
// retrieve the current basic limits of the job
|
||||
int clazz = JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS;
|
||||
JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION limits = new JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION();
|
||||
limits.write();
|
||||
if (!lib.QueryInformationJobObject(job, clazz, limits.getPointer(), limits.size(), null)) {
|
||||
throw new UnsupportedOperationException("QueryInformationJobObject: " + Native.getLastError());
|
||||
}
|
||||
limits.read();
|
||||
// modify the number of active processes to be 1 (exactly the one process we will add to the job).
|
||||
limits.ActiveProcessLimit = 1;
|
||||
limits.LimitFlags = JNAKernel32Library.JOB_OBJECT_LIMIT_ACTIVE_PROCESS;
|
||||
limits.write();
|
||||
if (!lib.SetInformationJobObject(job, clazz, limits.getPointer(), limits.size())) {
|
||||
throw new UnsupportedOperationException("SetInformationJobObject: " + Native.getLastError());
|
||||
}
|
||||
// assign ourselves to the job
|
||||
if (!lib.AssignProcessToJobObject(job, lib.GetCurrentProcess())) {
|
||||
throw new UnsupportedOperationException("AssignProcessToJobObject: " + Native.getLastError());
|
||||
}
|
||||
} finally {
|
||||
lib.CloseHandle(job);
|
||||
}
|
||||
|
||||
logger.debug("Windows ActiveProcessLimit initialization successful");
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to drop the capability to execute for the process.
|
||||
* <p>
|
||||
@ -581,6 +626,9 @@ final class Seccomp {
|
||||
} else if (Constants.FREE_BSD || OPENBSD) {
|
||||
bsdImpl();
|
||||
return 1;
|
||||
} else if (Constants.WINDOWS) {
|
||||
windowsImpl();
|
||||
return 1;
|
||||
} else {
|
||||
throw new UnsupportedOperationException("syscall filtering not supported for OS: '" + Constants.OS_NAME + "'");
|
||||
}
|
||||
|
@ -131,34 +131,48 @@ final class Security {
|
||||
@SuppressForbidden(reason = "proper use of URL")
|
||||
static Map<String,Policy> getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException {
|
||||
Map<String,Policy> map = new HashMap<>();
|
||||
// collect up lists of plugins and modules
|
||||
List<Path> pluginsAndModules = new ArrayList<>();
|
||||
if (Files.exists(environment.pluginsFile())) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
|
||||
for (Path plugin : stream) {
|
||||
Path policyFile = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY);
|
||||
if (Files.exists(policyFile)) {
|
||||
// first get a list of URLs for the plugins' jars:
|
||||
// we resolve symlinks so map is keyed on the normalize codebase name
|
||||
List<URL> codebases = new ArrayList<>();
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||
for (Path jar : jarStream) {
|
||||
codebases.add(jar.toRealPath().toUri().toURL());
|
||||
}
|
||||
}
|
||||
|
||||
// parse the plugin's policy file into a set of permissions
|
||||
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()]));
|
||||
|
||||
// consult this policy for each of the plugin's jars:
|
||||
for (URL url : codebases) {
|
||||
if (map.put(url.getFile(), policy) != null) {
|
||||
// just be paranoid ok?
|
||||
throw new IllegalStateException("per-plugin permissions already granted for jar file: " + url);
|
||||
}
|
||||
}
|
||||
pluginsAndModules.add(plugin);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Files.exists(environment.modulesFile())) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.modulesFile())) {
|
||||
for (Path plugin : stream) {
|
||||
pluginsAndModules.add(plugin);
|
||||
}
|
||||
}
|
||||
}
|
||||
// now process each one
|
||||
for (Path plugin : pluginsAndModules) {
|
||||
Path policyFile = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY);
|
||||
if (Files.exists(policyFile)) {
|
||||
// first get a list of URLs for the plugins' jars:
|
||||
// we resolve symlinks so map is keyed on the normalize codebase name
|
||||
List<URL> codebases = new ArrayList<>();
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||
for (Path jar : jarStream) {
|
||||
codebases.add(jar.toRealPath().toUri().toURL());
|
||||
}
|
||||
}
|
||||
|
||||
// parse the plugin's policy file into a set of permissions
|
||||
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()]));
|
||||
|
||||
// consult this policy for each of the plugin's jars:
|
||||
for (URL url : codebases) {
|
||||
if (map.put(url.getFile(), policy) != null) {
|
||||
// just be paranoid ok?
|
||||
throw new IllegalStateException("per-plugin permissions already granted for jar file: " + url);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
@ -228,6 +242,7 @@ final class Security {
|
||||
// read-only dirs
|
||||
addPath(policy, "path.home", environment.binFile(), "read,readlink");
|
||||
addPath(policy, "path.home", environment.libFile(), "read,readlink");
|
||||
addPath(policy, "path.home", environment.modulesFile(), "read,readlink");
|
||||
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
|
||||
addPath(policy, "path.conf", environment.configFile(), "read,readlink");
|
||||
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
|
||||
|
@ -125,7 +125,7 @@ public class TransportClient extends AbstractClient {
|
||||
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
|
||||
.build();
|
||||
|
||||
PluginsService pluginsService = new PluginsService(settings, null, pluginClasses);
|
||||
PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
|
||||
this.settings = pluginsService.updatedSettings();
|
||||
|
||||
Version version = Version.CURRENT;
|
||||
|
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
public interface AckedClusterStateTaskListener extends ClusterStateTaskListener {
|
||||
|
||||
/**
|
||||
* Called to determine which nodes the acknowledgement is expected from
|
||||
*
|
||||
* @param discoveryNode a node
|
||||
* @return true if the node is expected to send ack back, false otherwise
|
||||
*/
|
||||
boolean mustAck(DiscoveryNode discoveryNode);
|
||||
|
||||
/**
|
||||
* Called once all the nodes have acknowledged the cluster state update request. Must be
|
||||
* very lightweight execution, since it gets executed on the cluster service thread.
|
||||
*
|
||||
* @param t optional error that might have been thrown
|
||||
*/
|
||||
void onAllNodesAcked(@Nullable Throwable t);
|
||||
|
||||
/**
|
||||
* Called once the acknowledgement timeout defined by
|
||||
* {@link AckedClusterStateUpdateTask#ackTimeout()} has expired
|
||||
*/
|
||||
void onAckTimeout();
|
||||
|
||||
/**
|
||||
* Acknowledgement timeout, maximum time interval to wait for acknowledgements
|
||||
*/
|
||||
TimeValue ackTimeout();
|
||||
|
||||
}
|
@ -22,18 +22,24 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ack.AckedRequest;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
|
||||
* all the nodes have acknowledged a cluster state update request
|
||||
*/
|
||||
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask {
|
||||
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask implements AckedClusterStateTaskListener {
|
||||
|
||||
private final ActionListener<Response> listener;
|
||||
private final AckedRequest request;
|
||||
|
||||
protected AckedClusterStateUpdateTask(AckedRequest request, ActionListener<Response> listener) {
|
||||
this(Priority.NORMAL, request, listener);
|
||||
}
|
||||
|
||||
protected AckedClusterStateUpdateTask(Priority priority, AckedRequest request, ActionListener<Response> listener) {
|
||||
super(priority);
|
||||
this.listener = listener;
|
||||
this.request = request;
|
||||
}
|
||||
|
@ -176,7 +176,6 @@ public class ClusterModule extends AbstractModule {
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC, Validator.BYTES_SIZE);
|
||||
registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR);
|
||||
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
|
||||
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);
|
||||
|
@ -24,7 +24,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
@ -101,12 +100,35 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
|
||||
void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener);
|
||||
|
||||
/**
|
||||
* Submits a task that will update the cluster state.
|
||||
* Submits a cluster state update task; submitted updates will be
|
||||
* batched across the same instance of executor. The exact batching
|
||||
* semantics depend on the underlying implementation but a rough
|
||||
* guideline is that if the update task is submitted while there
|
||||
* are pending update tasks for the same executor, these update
|
||||
* tasks will all be executed on the executor in a single batch
|
||||
*
|
||||
* @param source the source of the cluster state update task
|
||||
* @param task the state needed for the cluster state update task
|
||||
* @param config the cluster state update task configuration
|
||||
* @param executor the cluster state update task executor; tasks
|
||||
* that share the same executor will be executed
|
||||
* batches on this executor
|
||||
* @param listener callback after the cluster state update task
|
||||
* completes
|
||||
* @param <T> the type of the cluster state update task state
|
||||
*/
|
||||
void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask);
|
||||
<T> void submitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
final ClusterStateTaskExecutor<T> executor,
|
||||
final ClusterStateTaskListener listener);
|
||||
|
||||
/**
|
||||
* Submits a task that will update the cluster state (the task has a default priority of {@link Priority#NORMAL}).
|
||||
* Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener)},
|
||||
* submitted updates will not be batched.
|
||||
*
|
||||
* @param source the source of the cluster state update task
|
||||
* @param updateTask the full context for the cluster state update
|
||||
* task
|
||||
*/
|
||||
void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask);
|
||||
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -469,6 +469,16 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
@ -584,6 +594,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
|
||||
public Builder routingResult(RoutingAllocation.Result routingResult) {
|
||||
this.routingTable = routingResult.routingTable();
|
||||
this.metaData = routingResult.metaData();
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -759,7 +770,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
nodes = after.nodes.diff(before.nodes);
|
||||
metaData = after.metaData.diff(before.metaData);
|
||||
blocks = after.blocks.diff(before.blocks);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException {
|
||||
@ -771,14 +782,15 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
nodes = proto.nodes.readDiffFrom(in);
|
||||
metaData = proto.metaData.readDiffFrom(in);
|
||||
blocks = proto.blocks.readDiffFrom(in);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
|
@ -0,0 +1,92 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* Cluster state update task configuration for timeout and priority
|
||||
*/
|
||||
public interface ClusterStateTaskConfig {
|
||||
/**
|
||||
* The timeout for this cluster state update task configuration. If
|
||||
* the cluster state update task isn't processed within this
|
||||
* timeout, the associated {@link ClusterStateTaskListener#onFailure(String, Throwable)}
|
||||
* is invoked.
|
||||
*
|
||||
* @return the timeout, or null if one is not set
|
||||
*/
|
||||
@Nullable
|
||||
TimeValue timeout();
|
||||
|
||||
/**
|
||||
* The {@link Priority} for this cluster state update task configuration.
|
||||
*
|
||||
* @return the priority
|
||||
*/
|
||||
Priority priority();
|
||||
|
||||
/**
|
||||
* Build a cluster state update task configuration with the
|
||||
* specified {@link Priority} and no timeout.
|
||||
*
|
||||
* @param priority the priority for the associated cluster state
|
||||
* update task
|
||||
* @return the resulting cluster state update task configuration
|
||||
*/
|
||||
static ClusterStateTaskConfig build(Priority priority) {
|
||||
return new Basic(priority, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a cluster state update task configuration with the
|
||||
* specified {@link Priority} and timeout.
|
||||
*
|
||||
* @param priority the priority for the associated cluster state
|
||||
* update task
|
||||
* @param timeout the timeout for the associated cluster state
|
||||
* update task
|
||||
* @return the result cluster state update task configuration
|
||||
*/
|
||||
static ClusterStateTaskConfig build(Priority priority, TimeValue timeout) {
|
||||
return new Basic(priority, timeout);
|
||||
}
|
||||
|
||||
class Basic implements ClusterStateTaskConfig {
|
||||
final TimeValue timeout;
|
||||
final Priority priority;
|
||||
|
||||
public Basic(Priority priority, TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public interface ClusterStateTaskExecutor<T> {
|
||||
/**
|
||||
* Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state
|
||||
* should be changed.
|
||||
*/
|
||||
BatchResult<T> execute(ClusterState currentState, List<T> tasks) throws Exception;
|
||||
|
||||
/**
|
||||
* indicates whether this task should only run if current node is master
|
||||
*/
|
||||
default boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the result of a batched execution of cluster state update tasks
|
||||
* @param <T> the type of the cluster state update task
|
||||
*/
|
||||
class BatchResult<T> {
|
||||
final public ClusterState resultingState;
|
||||
final public Map<T, TaskResult> executionResults;
|
||||
|
||||
/**
|
||||
* Construct an execution result instance with a correspondence between the tasks and their execution result
|
||||
* @param resultingState the resulting cluster state
|
||||
* @param executionResults the correspondence between tasks and their outcome
|
||||
*/
|
||||
BatchResult(ClusterState resultingState, Map<T, TaskResult> executionResults) {
|
||||
this.resultingState = resultingState;
|
||||
this.executionResults = executionResults;
|
||||
}
|
||||
|
||||
public static <T> Builder<T> builder() {
|
||||
return new Builder<>();
|
||||
}
|
||||
|
||||
public static class Builder<T> {
|
||||
private final Map<T, TaskResult> executionResults = new IdentityHashMap<>();
|
||||
|
||||
public Builder<T> success(T task) {
|
||||
return result(task, TaskResult.success());
|
||||
}
|
||||
|
||||
public Builder<T> successes(Iterable<T> tasks) {
|
||||
for (T task : tasks) {
|
||||
success(task);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder<T> failure(T task, Throwable t) {
|
||||
return result(task, TaskResult.failure(t));
|
||||
}
|
||||
|
||||
public Builder<T> failures(Iterable<T> tasks, Throwable t) {
|
||||
for (T task : tasks) {
|
||||
failure(task, t);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
private Builder<T> result(T task, TaskResult executionResult) {
|
||||
executionResults.put(task, executionResult);
|
||||
return this;
|
||||
}
|
||||
|
||||
public BatchResult<T> build(ClusterState resultingState) {
|
||||
return new BatchResult<>(resultingState, executionResults);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final class TaskResult {
|
||||
private final Throwable failure;
|
||||
|
||||
private static final TaskResult SUCCESS = new TaskResult(null);
|
||||
|
||||
public static TaskResult success() {
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
public static TaskResult failure(Throwable failure) {
|
||||
return new TaskResult(failure);
|
||||
}
|
||||
|
||||
private TaskResult(Throwable failure) {
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return failure != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the execution result with the provided consumers
|
||||
* @param onSuccess handler to invoke on success
|
||||
* @param onFailure handler to invoke on failure; the throwable passed through will not be null
|
||||
*/
|
||||
public void handle(Runnable onSuccess, Consumer<Throwable> onFailure) {
|
||||
if (failure == null) {
|
||||
onSuccess.run();
|
||||
} else {
|
||||
onFailure.accept(failure);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -16,22 +16,28 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.fieldvisitor;
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import java.util.List;
|
||||
|
||||
import java.io.IOException;
|
||||
public interface ClusterStateTaskListener {
|
||||
|
||||
/**
|
||||
*/
|
||||
public class AllFieldsVisitor extends FieldsVisitor {
|
||||
/**
|
||||
* A callback called when execute fails.
|
||||
*/
|
||||
void onFailure(String source, Throwable t);
|
||||
|
||||
public AllFieldsVisitor() {
|
||||
super(true);
|
||||
/**
|
||||
* called when the task was rejected because the local node is no longer master
|
||||
*/
|
||||
default void onNoLongerMaster(String source) {
|
||||
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status needsField(FieldInfo fieldInfo) throws IOException {
|
||||
return Status.YES;
|
||||
/**
|
||||
* Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed
|
||||
* properly by all listeners.
|
||||
*/
|
||||
default void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
}
|
||||
}
|
@ -20,13 +20,31 @@
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A task that can update the cluster state.
|
||||
*/
|
||||
abstract public class ClusterStateUpdateTask {
|
||||
abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<ClusterStateUpdateTask>, ClusterStateTaskListener {
|
||||
|
||||
final private Priority priority;
|
||||
|
||||
public ClusterStateUpdateTask() {
|
||||
this(Priority.NORMAL);
|
||||
}
|
||||
|
||||
public ClusterStateUpdateTask(Priority priority) {
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
final public BatchResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
|
||||
ClusterState result = execute(currentState);
|
||||
return BatchResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the cluster state based on the current state. Return the *same instance* if no state
|
||||
@ -39,28 +57,6 @@ abstract public class ClusterStateUpdateTask {
|
||||
*/
|
||||
abstract public void onFailure(String source, Throwable t);
|
||||
|
||||
|
||||
/**
|
||||
* indicates whether this task should only run if current node is master
|
||||
*/
|
||||
public boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* called when the task was rejected because the local node is no longer master
|
||||
*/
|
||||
public void onNoLongerMaster(String source) {
|
||||
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the result of the {@link #execute(ClusterState)} have been processed
|
||||
* properly by all listeners.
|
||||
*/
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
}
|
||||
|
||||
/**
|
||||
* If the cluster state update task wasn't processed by the provided timeout, call
|
||||
* {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default).
|
||||
@ -70,5 +66,8 @@ abstract public class ClusterStateUpdateTask {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||
public interface Diff<T> {
|
||||
|
||||
/**
|
||||
* Applies difference to the specified part and retunrs the resulted part
|
||||
* Applies difference to the specified part and returns the resulted part
|
||||
*/
|
||||
T apply(T part);
|
||||
|
||||
|
@ -19,263 +19,630 @@
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntCursor;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public final class DiffableUtils {
|
||||
private DiffableUtils() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for String keys
|
||||
*/
|
||||
public static KeySerializer<String> getStringKeySerializer() {
|
||||
return StringKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for Integer keys. Encodes as Int.
|
||||
*/
|
||||
public static KeySerializer<Integer> getIntKeySerializer() {
|
||||
return IntKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for Integer keys. Encodes as VInt.
|
||||
*/
|
||||
public static KeySerializer<Integer> getVIntKeySerializer() {
|
||||
return VIntKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenMaps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> diff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenMapDiff<>(before, after);
|
||||
return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenMaps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenIntMaps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenIntMaps of non-diffable objects
|
||||
*/
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer, NonDiffableValueSerializer<Integer, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two Maps of Diffable objects.
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> diff(Map<String, T> before, Map<String, T> after) {
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new JdkMapDiff<>(before, after);
|
||||
return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two Maps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new JdkMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keyedReader);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two Maps.
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
|
||||
return new JdkMapDiff<>(in, keyedReader);
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto));
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two Maps.
|
||||
* Loads an object that represents difference between two Maps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, T proto) throws IOException {
|
||||
return new JdkMapDiff<>(in, new PrototypeReader<>(proto));
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's
|
||||
* used in custom metadata deserialization.
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public interface KeyedReader<T> {
|
||||
|
||||
/**
|
||||
* reads an object of the type T from the stream input
|
||||
*/
|
||||
T readFrom(StreamInput in, String key) throws IOException;
|
||||
|
||||
/**
|
||||
* reads an object that respresents differences between two objects with the type T from the stream input
|
||||
*/
|
||||
Diff<T> readDiffFrom(StreamInput in, String key) throws IOException;
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the KeyedReader that is using a prototype object for reading operations
|
||||
*
|
||||
* Note: this implementation is ignoring the key.
|
||||
* Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static class PrototypeReader<T extends Diffable<T>> implements KeyedReader<T> {
|
||||
private T proto;
|
||||
|
||||
public PrototypeReader(T proto) {
|
||||
this.proto = proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T readFrom(StreamInput in, String key) throws IOException {
|
||||
return proto.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<T> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
return proto.readDiffFrom(in);
|
||||
}
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two Maps of Diffable objects.
|
||||
* Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two Maps of (possibly diffable) objects.
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
*/
|
||||
private static class JdkMapDiff<T extends Diffable<T>> extends MapDiff<T, Map<String, T>> {
|
||||
private static class JdkMapDiff<K, T> extends MapDiff<K, T, Map<K, T>> {
|
||||
|
||||
protected JdkMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
super(in, reader);
|
||||
protected JdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public JdkMapDiff(Map<String, T> before, Map<String, T> after) {
|
||||
public JdkMapDiff(Map<K, T> before, Map<K, T> after,
|
||||
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
for (String key : before.keySet()) {
|
||||
|
||||
for (K key : before.keySet()) {
|
||||
if (!after.containsKey(key)) {
|
||||
deletes.add(key);
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, T> partIter : after.entrySet()) {
|
||||
|
||||
for (Map.Entry<K, T> partIter : after.entrySet()) {
|
||||
T beforePart = before.get(partIter.getKey());
|
||||
if (beforePart == null) {
|
||||
adds.put(partIter.getKey(), partIter.getValue());
|
||||
upserts.put(partIter.getKey(), partIter.getValue());
|
||||
} else if (partIter.getValue().equals(beforePart) == false) {
|
||||
diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart));
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.getKey(), valueSerializer.diff(partIter.getValue(), beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.getKey(), partIter.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, T> apply(Map<String, T> map) {
|
||||
Map<String, T> builder = new HashMap<>();
|
||||
public Map<K, T> apply(Map<K, T> map) {
|
||||
Map<K, T> builder = new HashMap<>();
|
||||
builder.putAll(map);
|
||||
|
||||
for (String part : deletes) {
|
||||
for (K part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
|
||||
for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<String, T> additon : adds.entrySet()) {
|
||||
builder.put(additon.getKey(), additon.getValue());
|
||||
for (Map.Entry<K, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two ImmutableOpenMap of diffable objects
|
||||
* Represents differences between two ImmutableOpenMap of (possibly diffable) objects
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
* @param <T> the object type
|
||||
*/
|
||||
private static class ImmutableOpenMapDiff<T extends Diffable<T>> extends MapDiff<T, ImmutableOpenMap<String, T>> {
|
||||
private static class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
|
||||
|
||||
protected ImmutableOpenMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
super(in, reader);
|
||||
protected ImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public ImmutableOpenMapDiff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
|
||||
public ImmutableOpenMapDiff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after,
|
||||
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
for (ObjectCursor<String> key : before.keys()) {
|
||||
|
||||
for (ObjectCursor<K> key : before.keys()) {
|
||||
if (!after.containsKey(key.value)) {
|
||||
deletes.add(key.value);
|
||||
}
|
||||
}
|
||||
for (ObjectObjectCursor<String, T> partIter : after) {
|
||||
|
||||
for (ObjectObjectCursor<K, T> partIter : after) {
|
||||
T beforePart = before.get(partIter.key);
|
||||
if (beforePart == null) {
|
||||
adds.put(partIter.key, partIter.value);
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
} else if (partIter.value.equals(beforePart) == false) {
|
||||
diffs.put(partIter.key, partIter.value.diff(beforePart));
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenMap<String, T> apply(ImmutableOpenMap<String, T> map) {
|
||||
ImmutableOpenMap.Builder<String, T> builder = ImmutableOpenMap.builder();
|
||||
public ImmutableOpenMap<K, T> apply(ImmutableOpenMap<K, T> map) {
|
||||
ImmutableOpenMap.Builder<K, T> builder = ImmutableOpenMap.builder();
|
||||
builder.putAll(map);
|
||||
|
||||
for (String part : deletes) {
|
||||
for (K part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
|
||||
for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<String, T> additon : adds.entrySet()) {
|
||||
builder.put(additon.getKey(), additon.getValue());
|
||||
for (Map.Entry<K, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two maps of diffable objects
|
||||
* Represents differences between two ImmutableOpenIntMap of (possibly diffable) objects
|
||||
*
|
||||
* This class is used as base class for different map implementations
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
* @param <T> the object type
|
||||
*/
|
||||
private static abstract class MapDiff<T extends Diffable<T>, M> implements Diff<M> {
|
||||
private static class ImmutableOpenIntMapDiff<T> extends MapDiff<Integer, T, ImmutableOpenIntMap<T>> {
|
||||
|
||||
protected final List<String> deletes;
|
||||
protected final Map<String, Diff<T>> diffs;
|
||||
protected final Map<String, T> adds;
|
||||
|
||||
protected MapDiff() {
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
adds = new HashMap<>();
|
||||
protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
public ImmutableOpenIntMapDiff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after,
|
||||
KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
|
||||
for (IntCursor key : before.keys()) {
|
||||
if (!after.containsKey(key.value)) {
|
||||
deletes.add(key.value);
|
||||
}
|
||||
}
|
||||
|
||||
for (IntObjectCursor<T> partIter : after) {
|
||||
T beforePart = before.get(partIter.key);
|
||||
if (beforePart == null) {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
} else if (partIter.value.equals(beforePart) == false) {
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenIntMap<T> apply(ImmutableOpenIntMap<T> map) {
|
||||
ImmutableOpenIntMap.Builder<T> builder = ImmutableOpenIntMap.builder();
|
||||
builder.putAll(map);
|
||||
|
||||
for (Integer part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<Integer, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<Integer, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two maps of objects and is used as base class for different map implementations.
|
||||
*
|
||||
* Implements serialization. How differences are applied is left to subclasses.
|
||||
*
|
||||
* @param <K> the type of map keys
|
||||
* @param <T> the type of map values
|
||||
* @param <M> the map implementation type
|
||||
*/
|
||||
public static abstract class MapDiff<K, T, M> implements Diff<M> {
|
||||
|
||||
protected final List<K> deletes;
|
||||
protected final Map<K, Diff<T>> diffs; // incremental updates
|
||||
protected final Map<K, T> upserts; // additions or full updates
|
||||
protected final KeySerializer<K> keySerializer;
|
||||
protected final ValueSerializer<K, T> valueSerializer;
|
||||
|
||||
protected MapDiff(KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
adds = new HashMap<>();
|
||||
upserts = new HashMap<>();
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
upserts = new HashMap<>();
|
||||
int deletesCount = in.readVInt();
|
||||
for (int i = 0; i < deletesCount; i++) {
|
||||
deletes.add(in.readString());
|
||||
deletes.add(keySerializer.readKey(in));
|
||||
}
|
||||
|
||||
int diffsCount = in.readVInt();
|
||||
for (int i = 0; i < diffsCount; i++) {
|
||||
String key = in.readString();
|
||||
Diff<T> diff = reader.readDiffFrom(in, key);
|
||||
K key = keySerializer.readKey(in);
|
||||
Diff<T> diff = valueSerializer.readDiff(in, key);
|
||||
diffs.put(key, diff);
|
||||
}
|
||||
|
||||
int addsCount = in.readVInt();
|
||||
for (int i = 0; i < addsCount; i++) {
|
||||
String key = in.readString();
|
||||
T part = reader.readFrom(in, key);
|
||||
adds.put(key, part);
|
||||
int upsertsCount = in.readVInt();
|
||||
for (int i = 0; i < upsertsCount; i++) {
|
||||
K key = keySerializer.readKey(in);
|
||||
T newValue = valueSerializer.read(in, key);
|
||||
upserts.put(key, newValue);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The keys that, when this diff is applied to a map, should be removed from the map.
|
||||
*
|
||||
* @return the list of keys that are deleted
|
||||
*/
|
||||
public List<K> getDeletes() {
|
||||
return deletes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map entries that, when this diff is applied to a map, should be
|
||||
* incrementally updated. The incremental update is represented using
|
||||
* the {@link Diff} interface.
|
||||
*
|
||||
* @return the map entries that are incrementally updated
|
||||
*/
|
||||
public Map<K, Diff<T>> getDiffs() {
|
||||
return diffs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map entries that, when this diff is applied to a map, should be
|
||||
* added to the map or fully replace the previous value.
|
||||
*
|
||||
* @return the map entries that are additions or full updates
|
||||
*/
|
||||
public Map<K, T> getUpserts() {
|
||||
return upserts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(deletes.size());
|
||||
for (String delete : deletes) {
|
||||
out.writeString(delete);
|
||||
for (K delete : deletes) {
|
||||
keySerializer.writeKey(delete, out);
|
||||
}
|
||||
|
||||
out.writeVInt(diffs.size());
|
||||
for (Map.Entry<String, Diff<T>> entry : diffs.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
for (Map.Entry<K, Diff<T>> entry : diffs.entrySet()) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.writeDiff(entry.getValue(), out);
|
||||
}
|
||||
|
||||
out.writeVInt(adds.size());
|
||||
for (Map.Entry<String, T> entry : adds.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
out.writeVInt(upserts.size());
|
||||
for (Map.Entry<K, T> entry : upserts.entrySet()) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.write(entry.getValue(), out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides read and write operations to serialize keys of map
|
||||
* @param <K> type of key
|
||||
*/
|
||||
public interface KeySerializer<K> {
|
||||
void writeKey(K key, StreamOutput out) throws IOException;
|
||||
K readKey(StreamInput in) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes String keys of a map
|
||||
*/
|
||||
private static final class StringKeySerializer implements KeySerializer<String> {
|
||||
private static final StringKeySerializer INSTANCE = new StringKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(String key, StreamOutput out) throws IOException {
|
||||
out.writeString(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readKey(StreamInput in) throws IOException {
|
||||
return in.readString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes Integer keys of a map as an Int
|
||||
*/
|
||||
private static final class IntKeySerializer implements KeySerializer<Integer> {
|
||||
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(Integer key, StreamOutput out) throws IOException {
|
||||
out.writeInt(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer readKey(StreamInput in) throws IOException {
|
||||
return in.readInt();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes Integer keys of a map as a VInt. Requires keys to be positive.
|
||||
*/
|
||||
private static final class VIntKeySerializer implements KeySerializer<Integer> {
|
||||
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(Integer key, StreamOutput out) throws IOException {
|
||||
if (key < 0) {
|
||||
throw new IllegalArgumentException("Map key [" + key + "] must be positive");
|
||||
}
|
||||
out.writeVInt(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer readKey(StreamInput in) throws IOException {
|
||||
return in.readVInt();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides read and write operations to serialize map values.
|
||||
* Reading of values can be made dependent on map key.
|
||||
*
|
||||
* Also provides operations to distinguish whether map values are diffable.
|
||||
*
|
||||
* Should not be directly implemented, instead implement either
|
||||
* {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}.
|
||||
*
|
||||
* @param <K> key type of map
|
||||
* @param <V> value type of map
|
||||
*/
|
||||
public interface ValueSerializer<K, V> {
|
||||
|
||||
/**
|
||||
* Writes value to stream
|
||||
*/
|
||||
void write(V value, StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads value from stream. Reading operation can be made dependent on map key.
|
||||
*/
|
||||
V read(StreamInput in, K key) throws IOException;
|
||||
|
||||
/**
|
||||
* Whether this serializer supports diffable values
|
||||
*/
|
||||
boolean supportsDiffableValues();
|
||||
|
||||
/**
|
||||
* Computes diff if this serializer supports diffable values
|
||||
*/
|
||||
Diff<V> diff(V value, V beforePart);
|
||||
|
||||
/**
|
||||
* Writes value as diff to stream if this serializer supports diffable values
|
||||
*/
|
||||
void writeDiff(Diff<V> value, StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads value as diff from stream if this serializer supports diffable values.
|
||||
* Reading operation can be made dependent on map key.
|
||||
*/
|
||||
Diff<V> readDiff(StreamInput in, K key) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializer for Diffable map values. Needs to implement read and readDiff methods.
|
||||
*
|
||||
* @param <K> type of map keys
|
||||
* @param <V> type of map values
|
||||
*/
|
||||
public static abstract class DiffableValueSerializer<K, V extends Diffable<V>> implements ValueSerializer<K, V> {
|
||||
private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() {
|
||||
@Override
|
||||
public Object read(StreamInput in, Object key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Object> readDiff(StreamInput in, Object key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
};
|
||||
|
||||
private static <K, V extends Diffable<V>> DiffableValueSerializer<K, V> getWriteOnlyInstance() {
|
||||
return WRITE_ONLY_INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsDiffableValues() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> diff(V value, V beforePart) {
|
||||
return value.diff(beforePart);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(V value, StreamOutput out) throws IOException {
|
||||
value.writeTo(out);
|
||||
}
|
||||
|
||||
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
|
||||
value.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializer for non-diffable map values
|
||||
*
|
||||
* @param <K> type of map keys
|
||||
* @param <V> type of map values
|
||||
*/
|
||||
public static abstract class NonDiffableValueSerializer<K, V> implements ValueSerializer<K, V> {
|
||||
@Override
|
||||
public boolean supportsDiffableValues() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> diff(V value, V beforePart) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the ValueSerializer that uses a prototype object for reading operations
|
||||
*
|
||||
* Note: this implementation is ignoring the key.
|
||||
*/
|
||||
public static class DiffablePrototypeValueReader<K, V extends Diffable<V>> extends DiffableValueSerializer<K, V> {
|
||||
private final V proto;
|
||||
|
||||
public DiffablePrototypeValueReader(V proto) {
|
||||
this.proto = proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public V read(StreamInput in, K key) throws IOException {
|
||||
return proto.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
|
||||
return proto.readDiffFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of ValueSerializer that serializes immutable sets
|
||||
*
|
||||
* @param <K> type of map key
|
||||
*/
|
||||
public static class StringSetValueSerializer<K> extends NonDiffableValueSerializer<K, Set<String>> {
|
||||
private static final StringSetValueSerializer INSTANCE = new StringSetValueSerializer();
|
||||
|
||||
public static <K> StringSetValueSerializer<K> getInstance() {
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Set<String> value, StreamOutput out) throws IOException {
|
||||
out.writeStringArray(value.toArray(new String[value.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> read(StreamInput in, K key) throws IOException {
|
||||
return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(in.readStringArray())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -57,7 +56,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) {
|
||||
final DiscoveryNodes nodes = state.nodes();
|
||||
if (nodes.masterNode() == null) {
|
||||
logger.warn("can't send mapping refresh for [{}][{}], no master known.", request.index(), Strings.arrayToCommaDelimitedString(request.types()));
|
||||
logger.warn("can't send mapping refresh for [{}], no master known.", request.index());
|
||||
return;
|
||||
}
|
||||
transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
@ -67,7 +66,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
|
||||
metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types());
|
||||
metaDataMappingService.refreshMapping(request.index(), request.indexUUID());
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
@ -76,16 +75,14 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
|
||||
private String index;
|
||||
private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
private String[] types;
|
||||
private String nodeId;
|
||||
|
||||
public NodeMappingRefreshRequest() {
|
||||
}
|
||||
|
||||
public NodeMappingRefreshRequest(String index, String indexUUID, String[] types, String nodeId) {
|
||||
public NodeMappingRefreshRequest(String index, String indexUUID, String nodeId) {
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
this.types = types;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
@ -107,11 +104,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
return indexUUID;
|
||||
}
|
||||
|
||||
|
||||
public String[] types() {
|
||||
return types;
|
||||
}
|
||||
|
||||
public String nodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
@ -120,7 +112,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeStringArray(types);
|
||||
out.writeString(nodeId);
|
||||
out.writeString(indexUUID);
|
||||
}
|
||||
@ -129,7 +120,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
types = in.readStringArray();
|
||||
nodeId = in.readString();
|
||||
indexUUID = in.readString();
|
||||
}
|
||||
|
@ -20,9 +20,7 @@
|
||||
package org.elasticsearch.cluster.action.shard;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
@ -38,15 +36,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
@ -63,9 +58,6 @@ public class ShardStateAction extends AbstractComponent {
|
||||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
|
||||
private final BlockingQueue<ShardRoutingEntry> startedShardsQueue = ConcurrentCollections.newBlockingQueue();
|
||||
private final BlockingQueue<ShardRoutingEntry> failedShardQueue = ConcurrentCollections.newBlockingQueue();
|
||||
|
||||
@Inject
|
||||
public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
|
||||
AllocationService allocationService, RoutingService routingService) {
|
||||
@ -141,104 +133,94 @@ public class ShardStateAction extends AbstractComponent {
|
||||
});
|
||||
}
|
||||
|
||||
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
|
||||
|
||||
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
failedShardQueue.add(shardRoutingEntry);
|
||||
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
shardFailedClusterStateHandler,
|
||||
shardFailedClusterStateHandler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
if (shardRoutingEntry.processed) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
|
||||
failedShardQueue.drainTo(shardRoutingEntries);
|
||||
|
||||
// nothing to process (a previous event has processed it already)
|
||||
if (shardRoutingEntries.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(shardRoutingEntries.size());
|
||||
|
||||
// mark all entries as processed
|
||||
for (ShardRoutingEntry entry : shardRoutingEntries) {
|
||||
entry.processed = true;
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(entry.shardRouting, entry.message, entry.failure));
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (!routingResult.changed()) {
|
||||
return currentState;
|
||||
}
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
batchResultBuilder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
batchResultBuilder.failures(tasks, t);
|
||||
}
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
|
||||
logger.trace("unassigned shards after shard failures. scheduling a reroute.");
|
||||
routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
}
|
||||
|
||||
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
|
||||
new ShardStartedClusterStateHandler();
|
||||
|
||||
private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.debug("received shard started for {}", shardRoutingEntry);
|
||||
// buffer shard started requests, and the state update tasks will simply drain it
|
||||
// this is to optimize the number of "started" events we generate, and batch them
|
||||
// possibly, we can do time based batching as well, but usually, we would want to
|
||||
// process started events as fast as possible, to make shards available
|
||||
startedShardsQueue.add(shardRoutingEntry);
|
||||
|
||||
clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", Priority.URGENT,
|
||||
new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateHandler,
|
||||
shardStartedClusterStateHandler);
|
||||
}
|
||||
|
||||
if (shardRoutingEntry.processed) {
|
||||
return currentState;
|
||||
}
|
||||
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
|
||||
List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(task.shardRouting);
|
||||
}
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result =
|
||||
allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
builder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
builder.failures(tasks, t);
|
||||
}
|
||||
|
||||
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
|
||||
startedShardsQueue.drainTo(shardRoutingEntries);
|
||||
return builder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
// nothing to process (a previous event has processed it already)
|
||||
if (shardRoutingEntries.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<ShardRouting> shardRoutingToBeApplied = new ArrayList<>(shardRoutingEntries.size());
|
||||
|
||||
// mark all entries as processed
|
||||
for (ShardRoutingEntry entry : shardRoutingEntries) {
|
||||
entry.processed = true;
|
||||
shardRoutingToBeApplied.add(entry.shardRouting);
|
||||
}
|
||||
|
||||
if (shardRoutingToBeApplied.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.applyStartedShards(currentState, shardRoutingToBeApplied, true);
|
||||
if (!routingResult.changed()) {
|
||||
return currentState;
|
||||
}
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
}
|
||||
|
||||
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
@ -266,8 +248,6 @@ public class ShardStateAction extends AbstractComponent {
|
||||
String message;
|
||||
Throwable failure;
|
||||
|
||||
volatile boolean processed; // state field, no need to serialize
|
||||
|
||||
public ShardRoutingEntry() {
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
@ -30,6 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
@ -46,10 +48,13 @@ import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
|
||||
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
|
||||
@ -168,6 +173,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
|
||||
|
||||
private final int numberOfShards;
|
||||
private final int numberOfReplicas;
|
||||
|
||||
@ -184,6 +191,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
|
||||
private final ImmutableOpenMap<String, Custom> customs;
|
||||
|
||||
private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
|
||||
|
||||
private transient final int totalNumberOfShards;
|
||||
|
||||
private final DiscoveryNodeFilters requireFilters;
|
||||
@ -194,65 +203,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private final Version indexUpgradedVersion;
|
||||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
|
||||
private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
|
||||
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
|
||||
if (maybeNumberOfShards == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
|
||||
}
|
||||
int numberOfShards = maybeNumberOfShards;
|
||||
if (numberOfShards <= 0) {
|
||||
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
|
||||
}
|
||||
private IndexMetaData(String index, long version, State state, int numberOfShards, int numberOfReplicas, Settings settings,
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
|
||||
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
|
||||
Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion) {
|
||||
|
||||
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (maybeNumberOfReplicas == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
|
||||
}
|
||||
int numberOfReplicas = maybeNumberOfReplicas;
|
||||
if (numberOfReplicas < 0) {
|
||||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
this.index = index;
|
||||
this.version = version;
|
||||
this.state = state;
|
||||
this.settings = settings;
|
||||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.numberOfShards = numberOfShards;
|
||||
this.numberOfReplicas = numberOfReplicas;
|
||||
this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
|
||||
this.settings = settings;
|
||||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.aliases = aliases;
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
} else {
|
||||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
indexCreatedVersion = Version.indexCreated(settings);
|
||||
indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
this.minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
|
||||
}
|
||||
} else {
|
||||
this.minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
this.activeAllocationIds = activeAllocationIds;
|
||||
this.requireFilters = requireFilters;
|
||||
this.includeFilters = includeFilters;
|
||||
this.excludeFilters = excludeFilters;
|
||||
this.indexCreatedVersion = indexCreatedVersion;
|
||||
this.indexUpgradedVersion = indexUpgradedVersion;
|
||||
this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
@ -364,6 +337,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
return (T) customs.get(type);
|
||||
}
|
||||
|
||||
public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
|
||||
return activeAllocationIds;
|
||||
}
|
||||
|
||||
public Set<String> activeAllocationIds(int shardId) {
|
||||
assert shardId >= 0 && shardId < numberOfShards;
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public DiscoveryNodeFilters requireFilters() {
|
||||
return requireFilters;
|
||||
@ -408,6 +390,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
if (!customs.equals(that.customs)) {
|
||||
return false;
|
||||
}
|
||||
if (!activeAllocationIds.equals(that.activeAllocationIds)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -418,6 +403,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
result = 31 * result + aliases.hashCode();
|
||||
result = 31 * result + settings.hashCode();
|
||||
result = 31 * result + mappings.hashCode();
|
||||
result = 31 * result + activeAllocationIds.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -450,16 +436,19 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private final Settings settings;
|
||||
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
|
||||
private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
|
||||
private Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
private final Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;
|
||||
|
||||
public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
|
||||
index = after.index;
|
||||
version = after.version;
|
||||
state = after.state;
|
||||
settings = after.settings;
|
||||
mappings = DiffableUtils.diff(before.mappings, after.mappings);
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
|
||||
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
public IndexMetaDataDiff(StreamInput in) throws IOException {
|
||||
@ -467,19 +456,22 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
version = in.readLong();
|
||||
state = State.fromId(in.readByte());
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader<Custom>() {
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
|
||||
DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -491,6 +483,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
mappings.writeTo(out);
|
||||
aliases.writeTo(out);
|
||||
customs.writeTo(out);
|
||||
activeAllocationIds.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -502,6 +495,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
builder.mappings.putAll(mappings.apply(part.mappings));
|
||||
builder.aliases.putAll(aliases.apply(part.aliases));
|
||||
builder.customs.putAll(customs.apply(part.customs));
|
||||
builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
@ -528,6 +522,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
|
||||
builder.putCustom(type, customIndexMetaData);
|
||||
}
|
||||
int activeAllocationIdsSize = in.readVInt();
|
||||
for (int i = 0; i < activeAllocationIdsSize; i++) {
|
||||
int key = in.readVInt();
|
||||
Set<String> allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
|
||||
builder.putActiveAllocationIds(key, allocationIds);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@ -550,6 +550,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
}
|
||||
out.writeVInt(activeAllocationIds.size());
|
||||
for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
|
||||
out.writeVInt(cursor.key);
|
||||
DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
|
||||
}
|
||||
}
|
||||
|
||||
public static Builder builder(String index) {
|
||||
@ -569,12 +574,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
|
||||
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
|
||||
private final ImmutableOpenMap.Builder<String, Custom> customs;
|
||||
private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;
|
||||
|
||||
public Builder(String index) {
|
||||
this.index = index;
|
||||
this.mappings = ImmutableOpenMap.builder();
|
||||
this.aliases = ImmutableOpenMap.builder();
|
||||
this.customs = ImmutableOpenMap.builder();
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder();
|
||||
}
|
||||
|
||||
public Builder(IndexMetaData indexMetaData) {
|
||||
@ -585,6 +592,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
|
||||
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
|
||||
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
|
||||
}
|
||||
|
||||
public String index() {
|
||||
@ -693,6 +701,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
return this.customs.get(type);
|
||||
}
|
||||
|
||||
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
|
||||
activeAllocationIds.put(shardId, new HashSet(allocationIds));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Set<String> getActiveAllocationIds(int shardId) {
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
@ -714,7 +731,72 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
}
|
||||
|
||||
return new IndexMetaData(index, version, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build());
|
||||
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
|
||||
if (maybeNumberOfShards == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
|
||||
}
|
||||
int numberOfShards = maybeNumberOfShards;
|
||||
if (numberOfShards <= 0) {
|
||||
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (maybeNumberOfReplicas == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
|
||||
}
|
||||
int numberOfReplicas = maybeNumberOfReplicas;
|
||||
if (numberOfReplicas < 0) {
|
||||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
// fill missing slots in activeAllocationIds with empty set if needed and make all entries immutable
|
||||
ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds = ImmutableOpenIntMap.builder();
|
||||
for (int i = 0; i < numberOfShards; i++) {
|
||||
if (activeAllocationIds.containsKey(i)) {
|
||||
filledActiveAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
|
||||
} else {
|
||||
filledActiveAllocationIds.put(i, Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
final DiscoveryNodeFilters requireFilters;
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
final DiscoveryNodeFilters includeFilters;
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
final DiscoveryNodeFilters excludeFilters;
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
} else {
|
||||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
Version indexCreatedVersion = Version.indexCreated(settings);
|
||||
Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE +"] setting", ex);
|
||||
}
|
||||
} else {
|
||||
minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
|
||||
return new IndexMetaData(index, version, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
|
||||
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
|
||||
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
|
||||
}
|
||||
|
||||
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
@ -757,6 +839,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
@ -792,6 +883,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
|
||||
}
|
||||
} else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
String shardId = currentFieldName;
|
||||
Set<String> allocationIds = new HashSet<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
allocationIds.add(parser.text());
|
||||
}
|
||||
}
|
||||
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// check if its a custom index metadata
|
||||
Custom proto = lookupPrototype(currentFieldName);
|
||||
|
@ -27,7 +27,6 @@ import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
@ -41,6 +40,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
@ -54,7 +54,6 @@ import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
@ -640,9 +639,9 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
version = after.version;
|
||||
transientSettings = after.transientSettings;
|
||||
persistentSettings = after.persistentSettings;
|
||||
indices = DiffableUtils.diff(before.indices, after.indices);
|
||||
templates = DiffableUtils.diff(before.templates, after.templates);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer());
|
||||
templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public MetaDataDiff(StreamInput in) throws IOException {
|
||||
@ -650,16 +649,17 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
version = in.readLong();
|
||||
transientSettings = Settings.readSettingsFromStream(in);
|
||||
persistentSettings = Settings.readSettingsFromStream(in);
|
||||
indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO);
|
||||
templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
|
||||
indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO);
|
||||
templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
@ -748,8 +748,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC,
|
||||
RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE,
|
||||
RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC));
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC));
|
||||
|
||||
|
||||
/** All known time cluster settings. */
|
||||
@ -1029,12 +1028,18 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
|
||||
for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) {
|
||||
AliasMetaData aliasMetaData = aliasCursor.value;
|
||||
AliasOrIndex.Alias aliasOrIndex = (AliasOrIndex.Alias) aliasAndIndexLookup.get(aliasMetaData.getAlias());
|
||||
AliasOrIndex aliasOrIndex = aliasAndIndexLookup.get(aliasMetaData.getAlias());
|
||||
if (aliasOrIndex == null) {
|
||||
aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
|
||||
aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex);
|
||||
} else if (aliasOrIndex instanceof AliasOrIndex.Alias) {
|
||||
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
|
||||
alias.addIndex(indexMetaData);
|
||||
} else if (aliasOrIndex instanceof AliasOrIndex.Index) {
|
||||
AliasOrIndex.Index index = (AliasOrIndex.Index) aliasOrIndex;
|
||||
throw new IllegalStateException("index and alias names need to be unique, but alias [" + aliasMetaData.getAlias() + "] and index [" + index.getIndex().getIndex() + "] have the same name");
|
||||
} else {
|
||||
aliasOrIndex.addIndex(indexMetaData);
|
||||
throw new IllegalStateException("unexpected alias [" + aliasMetaData.getAlias() + "][" + aliasOrIndex + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -170,12 +170,12 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
request.settings(updatedSettingsBuilder.build());
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
@ -299,7 +299,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
// Set up everything, now locally create the index to see that things are ok, and apply
|
||||
final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
|
||||
// create the index here (on the master) to validate it can be created, as well as adding the mapping
|
||||
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.EMPTY_LIST);
|
||||
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
|
||||
indexCreated = true;
|
||||
// now add the mappings
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
|
@ -39,7 +39,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
@ -71,7 +70,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
||||
Collection<String> indices = Arrays.asList(request.indices);
|
||||
final DeleteIndexListener listener = new DeleteIndexListener(userListener);
|
||||
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
@ -62,7 +62,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
||||
}
|
||||
|
||||
public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
@ -99,7 +99,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
||||
if (indexService == null) {
|
||||
// temporarily create the index and add mappings so we can parse the filter
|
||||
try {
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
||||
}
|
||||
|
||||
final String indicesAsString = Arrays.toString(request.indices());
|
||||
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
@ -140,7 +140,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
||||
}
|
||||
|
||||
final String indicesAsString = Arrays.toString(request.indices());
|
||||
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
|
@ -56,7 +56,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
||||
}
|
||||
|
||||
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
|
||||
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
@ -143,7 +143,8 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
||||
}
|
||||
final IndexTemplateMetaData template = templateBuilder.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]",
|
||||
new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
@ -216,6 +217,9 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
||||
for (Alias alias : request.aliases) {
|
||||
//we validate the alias only partially, as we don't know yet to which index it'll get applied to
|
||||
aliasValidator.validateAliasStandalone(alias);
|
||||
if (request.template.equals(alias.name())) {
|
||||
throw new IllegalArgumentException("Alias [" + alias.name() + "] cannot be the same as the template pattern [" + request.template + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,8 +218,8 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
try {
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST);
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.EMPTY_MAP);
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
|
||||
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
|
||||
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) {
|
||||
@ -256,7 +256,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
||||
};
|
||||
|
||||
public FakeAnalysisService(IndexSettings indexSettings) {
|
||||
super(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
|
||||
super(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -22,28 +22,27 @@ package org.elasticsearch.cluster.metadata;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
/**
|
||||
* Service responsible for submitting mapping changes
|
||||
@ -53,13 +52,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
private final ClusterService clusterService;
|
||||
private final IndicesService indicesService;
|
||||
|
||||
// the mutex protect all the refreshOrUpdate variables!
|
||||
private final Object refreshOrUpdateMutex = new Object();
|
||||
private final List<MappingTask> refreshOrUpdateQueue = new ArrayList<>();
|
||||
private long refreshOrUpdateInsertOrder;
|
||||
private long refreshOrUpdateProcessedInsertOrder;
|
||||
final ClusterStateTaskExecutor<RefreshTask> refreshExecutor = new RefreshTaskExecutor();
|
||||
final ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> putMappingExecutor = new PutMappingExecutor();
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
|
||||
|
||||
@Inject
|
||||
public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
|
||||
super(settings);
|
||||
@ -68,89 +65,44 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
}
|
||||
|
||||
static class MappingTask {
|
||||
static class RefreshTask {
|
||||
final String index;
|
||||
final String indexUUID;
|
||||
|
||||
MappingTask(String index, final String indexUUID) {
|
||||
RefreshTask(String index, final String indexUUID) {
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
}
|
||||
}
|
||||
|
||||
static class RefreshTask extends MappingTask {
|
||||
final String[] types;
|
||||
|
||||
RefreshTask(String index, final String indexUUID, String[] types) {
|
||||
super(index, indexUUID);
|
||||
this.types = types;
|
||||
}
|
||||
}
|
||||
|
||||
static class UpdateTask extends MappingTask {
|
||||
final String type;
|
||||
final CompressedXContent mappingSource;
|
||||
final String nodeId; // null fr unknown
|
||||
final ActionListener<ClusterStateUpdateResponse> listener;
|
||||
|
||||
UpdateTask(String index, String indexUUID, String type, CompressedXContent mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
super(index, indexUUID);
|
||||
this.type = type;
|
||||
this.mappingSource = mappingSource;
|
||||
this.nodeId = nodeId;
|
||||
this.listener = listener;
|
||||
class RefreshTaskExecutor implements ClusterStateTaskExecutor<RefreshTask> {
|
||||
@Override
|
||||
public BatchResult<RefreshTask> execute(ClusterState currentState, List<RefreshTask> tasks) throws Exception {
|
||||
ClusterState newClusterState = executeRefresh(currentState, tasks);
|
||||
return BatchResult.<RefreshTask>builder().successes(tasks).build(newClusterState);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much
|
||||
* Batch method to apply all the queued refresh operations. The idea is to try and batch as much
|
||||
* as possible so we won't create the same index all the time for example for the updates on the same mapping
|
||||
* and generate a single cluster change event out of all of those.
|
||||
*/
|
||||
Tuple<ClusterState, List<MappingTask>> executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception {
|
||||
final List<MappingTask> allTasks = new ArrayList<>();
|
||||
|
||||
synchronized (refreshOrUpdateMutex) {
|
||||
if (refreshOrUpdateQueue.isEmpty()) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
// we already processed this task in a bulk manner in a previous cluster event, simply ignore
|
||||
// it so we will let other tasks get in and processed ones, we will handle the queued ones
|
||||
// later on in a subsequent cluster state event
|
||||
if (insertionOrder < refreshOrUpdateProcessedInsertOrder) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
allTasks.addAll(refreshOrUpdateQueue);
|
||||
refreshOrUpdateQueue.clear();
|
||||
|
||||
refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder;
|
||||
}
|
||||
|
||||
if (allTasks.isEmpty()) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception {
|
||||
// break down to tasks per index, so we can optimize the on demand index service creation
|
||||
// to only happen for the duration of a single index processing of its respective events
|
||||
Map<String, List<MappingTask>> tasksPerIndex = new HashMap<>();
|
||||
for (MappingTask task : allTasks) {
|
||||
Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>();
|
||||
for (RefreshTask task : allTasks) {
|
||||
if (task.index == null) {
|
||||
logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
|
||||
}
|
||||
List<MappingTask> indexTasks = tasksPerIndex.get(task.index);
|
||||
if (indexTasks == null) {
|
||||
indexTasks = new ArrayList<>();
|
||||
tasksPerIndex.put(task.index, indexTasks);
|
||||
}
|
||||
indexTasks.add(task);
|
||||
tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task);
|
||||
}
|
||||
|
||||
boolean dirty = false;
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) {
|
||||
for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
IndexMetaData indexMetaData = mdBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
@ -160,14 +112,17 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
}
|
||||
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
|
||||
// the latest (based on order) update mapping one per node
|
||||
List<MappingTask> allIndexTasks = entry.getValue();
|
||||
List<MappingTask> tasks = new ArrayList<>();
|
||||
for (MappingTask task : allIndexTasks) {
|
||||
if (!indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
List<RefreshTask> allIndexTasks = entry.getValue();
|
||||
boolean hasTaskWithRightUUID = false;
|
||||
for (RefreshTask task : allIndexTasks) {
|
||||
if (indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
hasTaskWithRightUUID = true;
|
||||
} else {
|
||||
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
|
||||
continue;
|
||||
}
|
||||
tasks.add(task);
|
||||
}
|
||||
if (hasTaskWithRightUUID == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// construct the actual index if needed, and make sure the relevant mappings are there
|
||||
@ -175,28 +130,17 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
// we need to create the index here, and add the current mapping to it, so we can merge
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
removeIndex = true;
|
||||
Set<String> typesToIntroduce = new HashSet<>();
|
||||
for (MappingTask task : tasks) {
|
||||
if (task instanceof UpdateTask) {
|
||||
typesToIntroduce.add(((UpdateTask) task).type);
|
||||
} else if (task instanceof RefreshTask) {
|
||||
Collections.addAll(typesToIntroduce, ((RefreshTask) task).types);
|
||||
}
|
||||
}
|
||||
for (String type : typesToIntroduce) {
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.getMappings().containsKey(type)) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(type, indexMetaData.getMappings().get(type).source(), false, true);
|
||||
}
|
||||
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
|
||||
}
|
||||
}
|
||||
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
|
||||
try {
|
||||
boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder);
|
||||
boolean indexDirty = refreshIndexMapping(indexService, builder);
|
||||
if (indexDirty) {
|
||||
mdBuilder.put(builder);
|
||||
dirty = true;
|
||||
@ -209,81 +153,33 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
}
|
||||
|
||||
if (!dirty) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
return currentState;
|
||||
}
|
||||
return Tuple.tuple(ClusterState.builder(currentState).metaData(mdBuilder).build(), allTasks);
|
||||
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
||||
}
|
||||
|
||||
private boolean processIndexMappingTasks(List<MappingTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
|
||||
private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Builder builder) {
|
||||
boolean dirty = false;
|
||||
String index = indexService.index().name();
|
||||
// keep track of what we already refreshed, no need to refresh it again...
|
||||
Set<String> processedRefreshes = new HashSet<>();
|
||||
for (MappingTask task : tasks) {
|
||||
if (task instanceof RefreshTask) {
|
||||
RefreshTask refreshTask = (RefreshTask) task;
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (String type : refreshTask.types) {
|
||||
if (processedRefreshes.contains(type)) {
|
||||
continue;
|
||||
}
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
|
||||
if (mapper == null) {
|
||||
continue;
|
||||
}
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
processedRefreshes.add(type);
|
||||
}
|
||||
|
||||
if (updatedTypes.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
final String type = mapper.type();
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
}
|
||||
} else if (task instanceof UpdateTask) {
|
||||
UpdateTask updateTask = (UpdateTask) task;
|
||||
try {
|
||||
String type = updateTask.type;
|
||||
CompressedXContent mappingSource = updateTask.mappingSource;
|
||||
|
||||
MappingMetaData mappingMetaData = builder.mapping(type);
|
||||
if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
|
||||
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type);
|
||||
continue;
|
||||
}
|
||||
|
||||
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false, true);
|
||||
processedRefreshes.add(type);
|
||||
|
||||
// if we end up with the same mapping as the original once, ignore
|
||||
if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) {
|
||||
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type);
|
||||
continue;
|
||||
}
|
||||
|
||||
// build the updated mapping source
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource());
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
|
||||
}
|
||||
|
||||
builder.putMapping(new MappingMetaData(updatedMapper));
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type);
|
||||
}
|
||||
} else {
|
||||
logger.warn("illegal state, got wrong mapping task type [{}]", task);
|
||||
}
|
||||
|
||||
// if a single type is not up-to-date, re-send everything
|
||||
if (updatedTypes.isEmpty() == false) {
|
||||
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state", t, index);
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
@ -291,198 +187,198 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
/**
|
||||
* Refreshes mappings if they are not the same between original and parsed version
|
||||
*/
|
||||
public void refreshMapping(final String index, final String indexUUID, final String... types) {
|
||||
final long insertOrder;
|
||||
synchronized (refreshOrUpdateMutex) {
|
||||
insertOrder = ++refreshOrUpdateInsertOrder;
|
||||
refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));
|
||||
}
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
private volatile List<MappingTask> allTasks;
|
||||
public void refreshMapping(final String index, final String indexUUID) {
|
||||
final RefreshTask refreshTask = new RefreshTask(index, indexUUID);
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "]",
|
||||
refreshTask,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
refreshExecutor,
|
||||
(source, t) -> logger.warn("failure during [{}]", t, source)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.warn("failure during [{}]", t, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Tuple<ClusterState, List<MappingTask>> tuple = executeRefreshOrUpdate(currentState, insertOrder);
|
||||
this.allTasks = tuple.v2();
|
||||
return tuple.v1();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (allTasks == null) {
|
||||
return;
|
||||
class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
|
||||
@Override
|
||||
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
|
||||
Set<String> indicesToClose = new HashSet<>();
|
||||
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
|
||||
try {
|
||||
// precreate incoming indices;
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
// failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
|
||||
for (String index : request.indices()) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData != null && indicesService.hasIndex(index) == false) {
|
||||
// if we don't have the index, we will throw exceptions later;
|
||||
indicesToClose.add(index);
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
// add mappings for all types, we need them for cross-type validation
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Object task : allTasks) {
|
||||
if (task instanceof UpdateTask) {
|
||||
UpdateTask uTask = (UpdateTask) task;
|
||||
ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true);
|
||||
uTask.listener.onResponse(response);
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
try {
|
||||
currentState = applyRequest(currentState, request);
|
||||
builder.success(request);
|
||||
} catch (Throwable t) {
|
||||
builder.failure(request, t);
|
||||
}
|
||||
}
|
||||
|
||||
return builder.build(currentState);
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
|
||||
Map<String, DocumentMapper> newMappers = new HashMap<>();
|
||||
Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
|
||||
}
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
// new types all at once , which can create a false error.
|
||||
|
||||
// For example in MapperService we can't distinguish between a create index api call
|
||||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
newMappers.put(index, newMapper);
|
||||
if (existingMapper != null) {
|
||||
existingMappers.put(index, existingMapper);
|
||||
}
|
||||
}
|
||||
|
||||
String mappingType = request.type();
|
||||
if (mappingType == null) {
|
||||
mappingType = newMappers.values().iterator().next().type();
|
||||
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
DocumentMapper newMapper = entry.getValue();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
if (existingSource.equals(updatedSource)) {
|
||||
// same source, no changes, ignore it
|
||||
} else {
|
||||
// use the merged mapping source
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if (mappings.isEmpty()) {
|
||||
// no changes, return
|
||||
return currentState;
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String indexName : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(indexName);
|
||||
}
|
||||
MappingMetaData mappingMd = mappings.get(indexName);
|
||||
if (mappingMd != null) {
|
||||
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
||||
}
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(builder).build();
|
||||
}
|
||||
}
|
||||
|
||||
public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]",
|
||||
request,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()),
|
||||
putMappingExecutor,
|
||||
new AckedClusterStateTaskListener() {
|
||||
|
||||
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) throws Exception {
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
try {
|
||||
for (String index : request.indices()) {
|
||||
if (!currentState.metaData().hasIndex(index)) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
// pre create indices here and add mappings to them so we can merge the mappings here if needed
|
||||
for (String index : request.indices()) {
|
||||
if (indicesService.hasIndex(index)) {
|
||||
continue;
|
||||
}
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
// make sure to add custom default mapping if exists
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
||||
}
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.getMappings().containsKey(request.type())) {
|
||||
indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
||||
}
|
||||
@Override
|
||||
public boolean mustAck(DiscoveryNode discoveryNode) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Map<String, DocumentMapper> newMappers = new HashMap<>();
|
||||
Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new MergeMappingException(mergeResult.buildConflicts());
|
||||
}
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
// new types all at once , which can create a false error.
|
||||
|
||||
// For example in MapperService we can't distinguish between a create index api call
|
||||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
newMappers.put(index, newMapper);
|
||||
if (existingMapper != null) {
|
||||
existingMappers.put(index, existingMapper);
|
||||
}
|
||||
@Override
|
||||
public void onAllNodesAcked(@Nullable Throwable t) {
|
||||
listener.onResponse(new ClusterStateUpdateResponse(true));
|
||||
}
|
||||
|
||||
String mappingType = request.type();
|
||||
if (mappingType == null) {
|
||||
mappingType = newMappers.values().iterator().next().type();
|
||||
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterStateUpdateResponse(false));
|
||||
}
|
||||
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
DocumentMapper newMapper = entry.getValue();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
if (existingSource.equals(updatedSource)) {
|
||||
// same source, no changes, ignore it
|
||||
} else {
|
||||
// use the merged mapping source
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public TimeValue ackTimeout() {
|
||||
return request.ackTimeout();
|
||||
}
|
||||
|
||||
if (mappings.isEmpty()) {
|
||||
// no changes, return
|
||||
return currentState;
|
||||
}
|
||||
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String indexName : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(indexName);
|
||||
}
|
||||
MappingMetaData mappingMd = mappings.get(indexName);
|
||||
if (mappingMd != null) {
|
||||
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
||||
}
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(builder).build();
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -24,11 +24,7 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
@ -44,13 +40,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
@ -219,7 +209,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
||||
}
|
||||
final Settings openSettings = updatedSettingsBuilder.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("update-settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("update-settings",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
@ -334,7 +325,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
||||
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
|
||||
|
||||
clusterService.submitStateUpdateTask("update-index-compatibility-versions", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
|
@ -147,7 +147,7 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
|
||||
return;
|
||||
}
|
||||
logger.trace("rerouting {}", reason);
|
||||
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
rerouting.set(false);
|
||||
|
@ -314,12 +314,12 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
|
||||
public RoutingTableDiff(RoutingTable before, RoutingTable after) {
|
||||
version = after.version;
|
||||
indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting);
|
||||
indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public RoutingTableDiff(StreamInput in) throws IOException {
|
||||
version = in.readLong();
|
||||
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, IndexRoutingTable.PROTO);
|
||||
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexRoutingTable.PROTO);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -27,7 +27,14 @@ import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
@ -39,6 +46,8 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@ -79,24 +88,83 @@ public class AllocationService extends AbstractComponent {
|
||||
StartedRerouteAllocation allocation = new StartedRerouteAllocation(allocationDeciders, routingNodes, clusterState.nodes(), startedShards, clusterInfoService.getClusterInfo());
|
||||
boolean changed = applyStartedShards(routingNodes, startedShards);
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable());
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
shardsAllocators.applyStartedShards(allocation);
|
||||
if (withReroute) {
|
||||
reroute(allocation);
|
||||
}
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
|
||||
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.metaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.metaData(), result.routingTable()),
|
||||
"shards started [" + startedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) {
|
||||
return buildChangedResult(metaData, routingNodes, new RoutingExplanations());
|
||||
|
||||
}
|
||||
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes, RoutingExplanations explanations) {
|
||||
final RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build();
|
||||
MetaData newMetaData = updateMetaDataWithRoutingTable(metaData,routingTable);
|
||||
return new RoutingAllocation.Result(true, routingTable.validateRaiseException(newMetaData), newMetaData, explanations);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the current {@link MetaData} based on the newly created {@link RoutingTable}.
|
||||
*
|
||||
* @param currentMetaData {@link MetaData} object from before the routing table was changed.
|
||||
* @param newRoutingTable new {@link RoutingTable} created by the allocation change
|
||||
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
|
||||
*/
|
||||
static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) {
|
||||
// make sure index meta data and routing tables are in sync w.r.t active allocation ids
|
||||
MetaData.Builder metaDataBuilder = null;
|
||||
for (IndexRoutingTable indexRoutingTable : newRoutingTable) {
|
||||
final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex());
|
||||
if (indexMetaData == null) {
|
||||
throw new IllegalStateException("no metadata found for index [" + indexRoutingTable.index() + "]");
|
||||
}
|
||||
IndexMetaData.Builder indexMetaDataBuilder = null;
|
||||
for (IndexShardRoutingTable shardRoutings : indexRoutingTable) {
|
||||
Set<String> activeAllocationIds = shardRoutings.activeShards().stream()
|
||||
.map(ShardRouting::allocationId)
|
||||
.filter(Objects::nonNull)
|
||||
.map(AllocationId::getId)
|
||||
.collect(Collectors.toSet());
|
||||
// only update active allocation ids if there is an active shard
|
||||
if (activeAllocationIds.isEmpty() == false) {
|
||||
// get currently stored allocation ids
|
||||
Set<String> storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id());
|
||||
if (activeAllocationIds.equals(storedAllocationIds) == false) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
}
|
||||
|
||||
indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (indexMetaDataBuilder != null) {
|
||||
if (metaDataBuilder == null) {
|
||||
metaDataBuilder = MetaData.builder(currentMetaData);
|
||||
}
|
||||
metaDataBuilder.put(indexMetaDataBuilder);
|
||||
}
|
||||
}
|
||||
if (metaDataBuilder != null) {
|
||||
return metaDataBuilder.build();
|
||||
} else {
|
||||
return currentMetaData;
|
||||
}
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) {
|
||||
return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null)));
|
||||
}
|
||||
@ -117,16 +185,15 @@ public class AllocationService extends AbstractComponent {
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable());
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
shardsAllocators.applyFailedShards(allocation);
|
||||
reroute(allocation);
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"shards failed [" + failedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
@ -169,11 +236,10 @@ public class AllocationService extends AbstractComponent {
|
||||
// the assumption is that commands will move / act on shards (or fail through exceptions)
|
||||
// so, there will always be shard "movements", so no need to check on reroute
|
||||
reroute(allocation);
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable, explanations);
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"reroute commands"
|
||||
);
|
||||
return result;
|
||||
@ -200,13 +266,12 @@ public class AllocationService extends AbstractComponent {
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState.nodes(), clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
allocation.debugDecision(debug);
|
||||
if (!reroute(allocation)) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable());
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
RoutingTable routingTable = new RoutingTable.Builder().updateNodes(routingNodes).build().validateRaiseException(clusterState.metaData());
|
||||
RoutingAllocation.Result result = new RoutingAllocation.Result(true, routingTable);
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), routingTable),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
reason
|
||||
);
|
||||
return result;
|
||||
|
@ -52,29 +52,33 @@ public class RoutingAllocation {
|
||||
|
||||
private final RoutingTable routingTable;
|
||||
|
||||
private final MetaData metaData;
|
||||
|
||||
private RoutingExplanations explanations = new RoutingExplanations();
|
||||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation.Result}
|
||||
*
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
* @param routingTable the {@link RoutingTable} this Result references
|
||||
* @param metaData the {@link MetaData} this Result references
|
||||
*/
|
||||
public Result(boolean changed, RoutingTable routingTable) {
|
||||
public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
|
||||
this.changed = changed;
|
||||
this.routingTable = routingTable;
|
||||
this.metaData = metaData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation.Result}
|
||||
*
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
* @param routingTable the {@link RoutingTable} this Result references
|
||||
* @param metaData the {@link MetaData} this Result references
|
||||
* @param explanations Explanation for the reroute actions
|
||||
*/
|
||||
public Result(boolean changed, RoutingTable routingTable, RoutingExplanations explanations) {
|
||||
public Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
|
||||
this.changed = changed;
|
||||
this.routingTable = routingTable;
|
||||
this.metaData = metaData;
|
||||
this.explanations = explanations;
|
||||
}
|
||||
|
||||
@ -85,6 +89,14 @@ public class RoutingAllocation {
|
||||
return this.changed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link MetaData} referenced by this result
|
||||
* @return referenced {@link MetaData}
|
||||
*/
|
||||
public MetaData metaData() {
|
||||
return metaData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link RoutingTable} referenced by this result
|
||||
* @return referenced {@link RoutingTable}
|
||||
|
@ -20,16 +20,8 @@
|
||||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ClusterState.Builder;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.LocalNodeMasterListener;
|
||||
import org.elasticsearch.cluster.TimeoutClusterStateListener;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
@ -41,6 +33,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
@ -49,13 +42,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.StringText;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
|
||||
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.*;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
@ -63,18 +50,10 @@ import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
|
||||
@ -111,6 +90,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Map<ClusterStateTaskExecutor, List<UpdateTask>> updateTasksPerExecutor = new HashMap<>();
|
||||
// TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API
|
||||
private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>();
|
||||
private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners);
|
||||
@ -289,30 +269,33 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
|
||||
@Override
|
||||
public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
|
||||
submitStateUpdateTask(source, Priority.NORMAL, updateTask);
|
||||
submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask) {
|
||||
public <T> void submitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
final ClusterStateTaskExecutor<T> executor,
|
||||
final ClusterStateTaskListener listener
|
||||
) {
|
||||
if (!lifecycle.started()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
final UpdateTask task = new UpdateTask(source, priority, updateTask);
|
||||
if (updateTask.timeout() != null) {
|
||||
updateTasksExecutor.execute(task, threadPool.scheduler(), updateTask.timeout(), new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
updateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(updateTask.timeout(), task.source()));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
final UpdateTask<T> updateTask = new UpdateTask<>(source, task, config, executor, listener);
|
||||
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()).add(updateTask);
|
||||
}
|
||||
|
||||
if (config.timeout() != null) {
|
||||
updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
|
||||
if (updateTask.processed.getAndSet(true) == false) {
|
||||
listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
|
||||
}}));
|
||||
} else {
|
||||
updateTasksExecutor.execute(task);
|
||||
updateTasksExecutor.execute(updateTask);
|
||||
}
|
||||
} catch (EsRejectedExecutionException e) {
|
||||
// ignore cases where we are shutting down..., there is really nothing interesting
|
||||
@ -379,188 +362,238 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
}
|
||||
}
|
||||
|
||||
class UpdateTask extends SourcePrioritizedRunnable {
|
||||
|
||||
public final ClusterStateUpdateTask updateTask;
|
||||
|
||||
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
|
||||
super(priority, source);
|
||||
this.updateTask = updateTask;
|
||||
<T> void runTasksForExecutor(ClusterStateTaskExecutor<T> executor) {
|
||||
final ArrayList<UpdateTask<T>> toExecute = new ArrayList<>();
|
||||
final ArrayList<String> sources = new ArrayList<>();
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
List<UpdateTask> pending = updateTasksPerExecutor.remove(executor);
|
||||
if (pending != null) {
|
||||
for (UpdateTask<T> task : pending) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.trace("will process [{}]", task.source);
|
||||
toExecute.add(task);
|
||||
sources.add(task.source);
|
||||
} else {
|
||||
logger.trace("skipping [{}], already processed", task.source);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (toExecute.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
final String source = Strings.collectionToCommaDelimitedString(sources);
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
|
||||
return;
|
||||
}
|
||||
logger.debug("processing [{}]: execute", source);
|
||||
ClusterState previousClusterState = clusterState;
|
||||
if (!previousClusterState.nodes().localNodeMaster() && executor.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", source);
|
||||
toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source));
|
||||
return;
|
||||
}
|
||||
ClusterStateTaskExecutor.BatchResult<T> batchResult;
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
|
||||
batchResult = executor.execute(previousClusterState, inputs);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
|
||||
return;
|
||||
}
|
||||
logger.debug("processing [{}]: execute", source);
|
||||
ClusterState previousClusterState = clusterState;
|
||||
if (!previousClusterState.nodes().localNodeMaster() && updateTask.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", source);
|
||||
updateTask.onNoLongerMaster(source);
|
||||
return;
|
||||
}
|
||||
ClusterState newClusterState;
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
newClusterState = updateTask.execute(previousClusterState);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
updateTask.onFailure(source, e);
|
||||
return;
|
||||
}
|
||||
assert batchResult.executionResults != null;
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
if (updateTask instanceof AckedClusterStateUpdateTask) {
|
||||
ClusterState newClusterState = batchResult.resultingState;
|
||||
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
|
||||
// fail all tasks that have failed and extract those that are waiting for results
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
|
||||
final ClusterStateTaskExecutor.TaskResult executionResult =
|
||||
batchResult.executionResults.get(updateTask.task);
|
||||
executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex));
|
||||
}
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
|
||||
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
|
||||
((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null);
|
||||
}
|
||||
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Discovery.AckListener ackListener = new NoOpAckListener();
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
// only the master controls the version numbers
|
||||
Builder builder = ClusterState.builder(newClusterState).incrementVersion();
|
||||
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
|
||||
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build());
|
||||
}
|
||||
if (previousClusterState.metaData() != newClusterState.metaData()) {
|
||||
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
|
||||
}
|
||||
newClusterState = builder.build();
|
||||
|
||||
if (updateTask instanceof AckedClusterStateUpdateTask) {
|
||||
final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask;
|
||||
if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) {
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
try {
|
||||
ArrayList<Discovery.AckListener> ackListeners = new ArrayList<>();
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
// only the master controls the version numbers
|
||||
Builder builder = ClusterState.builder(newClusterState).incrementVersion();
|
||||
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
|
||||
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build());
|
||||
}
|
||||
if (previousClusterState.metaData() != newClusterState.metaData()) {
|
||||
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
|
||||
}
|
||||
newClusterState = builder.build();
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
final AckedClusterStateTaskListener ackedListener = (AckedClusterStateTaskListener) task.listener;
|
||||
if (ackedListener.ackTimeout() == null || ackedListener.ackTimeout().millis() == 0) {
|
||||
ackedListener.onAckTimeout();
|
||||
} else {
|
||||
try {
|
||||
ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool);
|
||||
ackListeners.add(new AckCountDownListener(ackedListener, newClusterState.version(), newClusterState.nodes(), threadPool));
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
|
||||
}
|
||||
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
ackedListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, source);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
// the fault detection will detect it as failed as well
|
||||
logger.warn("failed to connect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
updateTask.onFailure(source, t);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
clusterState = newClusterState;
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodesDelta.removedNodes()) {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
|
||||
}
|
||||
}
|
||||
|
||||
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
final Discovery.AckListener ackListener = new DelegetingAckListener(ackListeners);
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, source);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
// the fault detection will detect it as failed as well
|
||||
logger.warn("failed to connect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
clusterState = newClusterState;
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodesDelta.removedNodes()) {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
|
||||
}
|
||||
}
|
||||
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class UpdateTask<T> extends SourcePrioritizedRunnable {
|
||||
|
||||
public final T task;
|
||||
public final ClusterStateTaskConfig config;
|
||||
public final ClusterStateTaskExecutor<T> executor;
|
||||
public final ClusterStateTaskListener listener;
|
||||
public final AtomicBoolean processed = new AtomicBoolean();
|
||||
|
||||
UpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
|
||||
super(config.priority(), source);
|
||||
this.task = task;
|
||||
this.config = config;
|
||||
this.executor = executor;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
runTasksForExecutor(executor);
|
||||
}
|
||||
}
|
||||
|
||||
@ -729,13 +762,24 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
}
|
||||
}
|
||||
|
||||
private static class NoOpAckListener implements Discovery.AckListener {
|
||||
private static class DelegetingAckListener implements Discovery.AckListener {
|
||||
|
||||
final private List<Discovery.AckListener> listeners;
|
||||
|
||||
private DelegetingAckListener(List<Discovery.AckListener> listeners) {
|
||||
this.listeners = listeners;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
|
||||
for (Discovery.AckListener listener : listeners) {
|
||||
listener.onNodeAck(node, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout() {
|
||||
throw new UnsupportedOperationException("no timeout delegation");
|
||||
}
|
||||
}
|
||||
|
||||
@ -743,20 +787,20 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
|
||||
|
||||
private final AckedClusterStateUpdateTask ackedUpdateTask;
|
||||
private final AckedClusterStateTaskListener ackedTaskListener;
|
||||
private final CountDown countDown;
|
||||
private final DiscoveryNodes nodes;
|
||||
private final long clusterStateVersion;
|
||||
private final Future<?> ackTimeoutCallback;
|
||||
private Throwable lastFailure;
|
||||
|
||||
AckCountDownListener(AckedClusterStateUpdateTask ackedUpdateTask, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
|
||||
this.ackedUpdateTask = ackedUpdateTask;
|
||||
AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
|
||||
this.ackedTaskListener = ackedTaskListener;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
this.nodes = nodes;
|
||||
int countDown = 0;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (ackedUpdateTask.mustAck(node)) {
|
||||
if (ackedTaskListener.mustAck(node)) {
|
||||
countDown++;
|
||||
}
|
||||
}
|
||||
@ -764,7 +808,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
countDown = Math.max(1, countDown);
|
||||
logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
|
||||
this.countDown = new CountDown(countDown);
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedUpdateTask.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
onTimeout();
|
||||
@ -774,7 +818,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
|
||||
@Override
|
||||
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
|
||||
if (!ackedUpdateTask.mustAck(node)) {
|
||||
if (!ackedTaskListener.mustAck(node)) {
|
||||
//we always wait for the master ack anyway
|
||||
if (!node.equals(nodes.masterNode())) {
|
||||
return;
|
||||
@ -790,7 +834,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
if (countDown.countDown()) {
|
||||
logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion);
|
||||
FutureUtils.cancel(ackTimeoutCallback);
|
||||
ackedUpdateTask.onAllNodesAcked(lastFailure);
|
||||
ackedTaskListener.onAllNodesAcked(lastFailure);
|
||||
}
|
||||
}
|
||||
|
||||
@ -798,7 +842,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
public void onTimeout() {
|
||||
if (countDown.fastForward()) {
|
||||
logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion);
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
ackedTaskListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -810,5 +854,4 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -65,8 +65,8 @@ public class MacAddressProvider {
|
||||
byte[] address = null;
|
||||
try {
|
||||
address = getMacAddress();
|
||||
} catch( SocketException se ) {
|
||||
logger.warn("Unable to get mac address, will use a dummy address", se);
|
||||
} catch (Throwable t) {
|
||||
logger.warn("Unable to get mac address, will use a dummy address", t);
|
||||
// address will be set below
|
||||
}
|
||||
|
||||
|
@ -116,7 +116,7 @@ public abstract class Terminal {
|
||||
}
|
||||
|
||||
public void printError(Throwable t) {
|
||||
printError("%s", t.getMessage());
|
||||
printError("%s", t.toString());
|
||||
if (isDebugEnabled) {
|
||||
printStackTrace(t);
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.compress.lzf.LZFCompressor;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -42,7 +41,6 @@ public class CompressorFactory {
|
||||
|
||||
static {
|
||||
compressors = new Compressor[] {
|
||||
new LZFCompressor(),
|
||||
new DeflateCompressor()
|
||||
};
|
||||
defaultCompressor = new DeflateCompressor();
|
||||
@ -82,12 +80,23 @@ public class CompressorFactory {
|
||||
|
||||
XContentType contentType = XContentFactory.xContentType(bytes);
|
||||
if (contentType == null) {
|
||||
if (isAncient(bytes)) {
|
||||
throw new IllegalStateException("unsupported compression: index was created before v2.0.0.beta1 and wasn't upgraded?");
|
||||
}
|
||||
throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes");
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/** true if the bytes were compressed with LZF: only used before elasticsearch 2.0 */
|
||||
private static boolean isAncient(BytesReference bytes) {
|
||||
return bytes.length() >= 3 &&
|
||||
bytes.get(0) == 'Z' &&
|
||||
bytes.get(1) == 'V' &&
|
||||
(bytes.get(2) == 0 || bytes.get(2) == 1);
|
||||
}
|
||||
|
||||
public static Compressor compressor(ChannelBuffer buffer) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(buffer)) {
|
||||
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import org.apache.lucene.store.BufferedIndexInput;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressedIndexInput extends CompressedIndexInput {
|
||||
|
||||
private final ChunkDecoder decoder;
|
||||
// scratch area buffer
|
||||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in);
|
||||
|
||||
this.decoder = decoder;
|
||||
this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
|
||||
this.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readHeader(IndexInput in) throws IOException {
|
||||
byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
|
||||
in.readBytes(header, 0, header.length, false);
|
||||
if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
|
||||
throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int uncompress(IndexInput in, byte[] out) throws IOException {
|
||||
return decoder.decodeChunk(new InputStreamIndexInput(in, Long.MAX_VALUE), inputBuffer, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
// nothing to do here...
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput clone() {
|
||||
LZFCompressedIndexInput cloned = (LZFCompressedIndexInput) super.clone();
|
||||
cloned.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
return cloned;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput slice(String description, long offset, long length) throws IOException {
|
||||
return BufferedIndexInput.wrap(description, this, offset, length);
|
||||
}
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.BufferRecycler;
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import org.elasticsearch.common.compress.CompressedStreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class LZFCompressedStreamInput extends CompressedStreamInput {
|
||||
|
||||
private final BufferRecycler recycler;
|
||||
|
||||
private final ChunkDecoder decoder;
|
||||
|
||||
// scratch area buffer
|
||||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in);
|
||||
this.recycler = BufferRecycler.instance();
|
||||
this.decoder = decoder;
|
||||
|
||||
this.uncompressed = recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
|
||||
this.inputBuffer = recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readHeader(StreamInput in) throws IOException {
|
||||
// nothing to do here, each chunk has a header
|
||||
}
|
||||
|
||||
@Override
|
||||
public int uncompress(StreamInput in, byte[] out) throws IOException {
|
||||
return decoder.decodeChunk(in, inputBuffer, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
byte[] buf = inputBuffer;
|
||||
if (buf != null) {
|
||||
inputBuffer = null;
|
||||
recycler.releaseInputBuffer(buf);
|
||||
}
|
||||
buf = uncompressed;
|
||||
if (buf != null) {
|
||||
uncompressed = null;
|
||||
recycler.releaseDecodeBuffer(uncompressed);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import com.ning.compress.lzf.util.ChunkDecoderFactory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link DeflateCompressor} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressor implements Compressor {
|
||||
|
||||
static final byte[] LUCENE_HEADER = {'L', 'Z', 'F', 0};
|
||||
|
||||
private ChunkDecoder decoder;
|
||||
|
||||
public LZFCompressor() {
|
||||
this.decoder = ChunkDecoderFactory.safeInstance();
|
||||
Loggers.getLogger(LZFCompressor.class).debug("using decoder[{}] ", this.decoder.getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(BytesReference bytes) {
|
||||
return bytes.length() >= 3 &&
|
||||
bytes.get(0) == LZFChunk.BYTE_Z &&
|
||||
bytes.get(1) == LZFChunk.BYTE_V &&
|
||||
(bytes.get(2) == LZFChunk.BLOCK_TYPE_COMPRESSED || bytes.get(2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(ChannelBuffer buffer) {
|
||||
int offset = buffer.readerIndex();
|
||||
return buffer.readableBytes() >= 3 &&
|
||||
buffer.getByte(offset) == LZFChunk.BYTE_Z &&
|
||||
buffer.getByte(offset + 1) == LZFChunk.BYTE_V &&
|
||||
(buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_COMPRESSED || buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(IndexInput in) throws IOException {
|
||||
long currentPointer = in.getFilePointer();
|
||||
// since we have some metdata before the first compressed header, we check on our specific header
|
||||
if (in.length() - currentPointer < (LUCENE_HEADER.length)) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < LUCENE_HEADER.length; i++) {
|
||||
if (in.readByte() != LUCENE_HEADER[i]) {
|
||||
in.seek(currentPointer);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
in.seek(currentPointer);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamInput streamInput(StreamInput in) throws IOException {
|
||||
return new LZFCompressedStreamInput(in, decoder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException("LZF is only here for back compat, no write support");
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompressedIndexInput indexInput(IndexInput in) throws IOException {
|
||||
return new LZFCompressedIndexInput(in, decoder);
|
||||
}
|
||||
}
|
@ -34,8 +34,6 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
|
||||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
|
||||
|
||||
protected boolean translated = false;
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -89,6 +89,4 @@ public class MultiPolygonBuilder extends ShapeBuilder {
|
||||
return new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
|
||||
//note: ShapeCollection is probably faster than a Multi* geom.
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
|
||||
@ -141,9 +142,10 @@ public class PolygonBuilder extends ShapeBuilder {
|
||||
|
||||
Edge[] edges = new Edge[numEdges];
|
||||
Edge[] holeComponents = new Edge[holes.size()];
|
||||
int offset = createEdges(0, orientation, shell, null, edges, 0);
|
||||
final AtomicBoolean translated = new AtomicBoolean(false);
|
||||
int offset = createEdges(0, orientation, shell, null, edges, 0, translated);
|
||||
for (int i = 0; i < holes.size(); i++) {
|
||||
int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset);
|
||||
int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset, translated);
|
||||
holeComponents[i] = edges[offset];
|
||||
offset += length;
|
||||
}
|
||||
@ -508,14 +510,157 @@ public class PolygonBuilder extends ShapeBuilder {
|
||||
}
|
||||
|
||||
private static int createEdges(int component, Orientation orientation, LineStringBuilder shell,
|
||||
LineStringBuilder hole,
|
||||
Edge[] edges, int offset) {
|
||||
LineStringBuilder hole, Edge[] edges, int offset, final AtomicBoolean translated) {
|
||||
// inner rings (holes) have an opposite direction than the outer rings
|
||||
// XOR will invert the orientation for outer ring cases (Truth Table:, T/T = F, T/F = T, F/T = T, F/F = F)
|
||||
boolean direction = (component == 0 ^ orientation == Orientation.RIGHT);
|
||||
// set the points array accordingly (shell or hole)
|
||||
Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false);
|
||||
Edge.ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1);
|
||||
ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1, translated);
|
||||
return points.length-1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a connected list of a list of coordinates
|
||||
*
|
||||
* @param points
|
||||
* array of point
|
||||
* @param offset
|
||||
* index of the first point
|
||||
* @param length
|
||||
* number of points
|
||||
* @return Array of edges
|
||||
*/
|
||||
private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
// neighbors orientation. So direction is equivalent
|
||||
// to clockwise/counterclockwise
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (offset + ((top + length - 1) % length));
|
||||
final int next = (offset + ((top + 1) % length));
|
||||
boolean orientation = points[offset + prev].x > points[offset + next].x;
|
||||
|
||||
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
|
||||
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
|
||||
// thus if orientation is computed as cw, the logic will translate points across dateline
|
||||
// and convert to a right handed system
|
||||
|
||||
// compute the bounding box and calculate range
|
||||
double[] range = range(points, offset, length);
|
||||
final double rng = range[1] - range[0];
|
||||
// translate the points if the following is true
|
||||
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
|
||||
// (translation would result in a collapsed poly)
|
||||
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
|
||||
boolean incorrectOrientation = component == 0 && handedness != orientation;
|
||||
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (translated.get() && component != 0)) {
|
||||
translate(points);
|
||||
// flip the translation bit if the shell is being translated
|
||||
if (component == 0) {
|
||||
translated.set(true);
|
||||
}
|
||||
// correct the orientation post translation (ccw for shell, cw for holes)
|
||||
if (component == 0 || (component != 0 && handedness == orientation)) {
|
||||
orientation = !orientation;
|
||||
}
|
||||
}
|
||||
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
|
||||
}
|
||||
|
||||
private static final int top(Coordinate[] points, int offset, int length) {
|
||||
int top = 0; // we start at 1 here since top points to 0
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (points[offset + i].y < points[offset + top].y) {
|
||||
top = i;
|
||||
} else if (points[offset + i].y == points[offset + top].y) {
|
||||
if (points[offset + i].x < points[offset + top].x) {
|
||||
top = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
return top;
|
||||
}
|
||||
|
||||
private static final double[] range(Coordinate[] points, int offset, int length) {
|
||||
double minX = points[0].x;
|
||||
double maxX = points[0].x;
|
||||
double minY = points[0].y;
|
||||
double maxY = points[0].y;
|
||||
// compute the bounding coordinates (@todo: cleanup brute force)
|
||||
for (int i = 1; i < length; ++i) {
|
||||
if (points[offset + i].x < minX) {
|
||||
minX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].x > maxX) {
|
||||
maxX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].y < minY) {
|
||||
minY = points[offset + i].y;
|
||||
}
|
||||
if (points[offset + i].y > maxY) {
|
||||
maxY = points[offset + i].y;
|
||||
}
|
||||
}
|
||||
return new double[] {minX, maxX, minY, maxY};
|
||||
}
|
||||
|
||||
/**
|
||||
* Concatenate a set of points to a polygon
|
||||
*
|
||||
* @param component
|
||||
* component id of the polygon
|
||||
* @param direction
|
||||
* direction of the ring
|
||||
* @param points
|
||||
* list of points to concatenate
|
||||
* @param pointOffset
|
||||
* index of the first point
|
||||
* @param edges
|
||||
* Array of edges to write the result to
|
||||
* @param edgeOffset
|
||||
* index of the first edge in the result
|
||||
* @param length
|
||||
* number of points to use
|
||||
* @return the edges creates
|
||||
*/
|
||||
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
|
||||
int length) {
|
||||
assert edges.length >= length+edgeOffset;
|
||||
assert points.length >= length+pointOffset;
|
||||
edges[edgeOffset] = new Edge(points[pointOffset], null);
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (direction) {
|
||||
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
|
||||
edges[edgeOffset + i].component = component;
|
||||
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
|
||||
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
|
||||
edges[edgeOffset + i - 1].component = component;
|
||||
} else {
|
||||
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (direction) {
|
||||
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
|
||||
edges[edgeOffset].component = component;
|
||||
} else {
|
||||
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
|
||||
edges[edgeOffset + length - 1].component = component;
|
||||
}
|
||||
|
||||
return edges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
|
||||
*/
|
||||
private static void translate(Coordinate[] points) {
|
||||
for (Coordinate c : points) {
|
||||
if (c.x < 0) {
|
||||
c.x += 2*DATELINE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -362,150 +362,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||
}
|
||||
}
|
||||
|
||||
private static final int top(Coordinate[] points, int offset, int length) {
|
||||
int top = 0; // we start at 1 here since top points to 0
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (points[offset + i].y < points[offset + top].y) {
|
||||
top = i;
|
||||
} else if (points[offset + i].y == points[offset + top].y) {
|
||||
if (points[offset + i].x < points[offset + top].x) {
|
||||
top = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
return top;
|
||||
}
|
||||
|
||||
private static final double[] range(Coordinate[] points, int offset, int length) {
|
||||
double minX = points[0].x;
|
||||
double maxX = points[0].x;
|
||||
double minY = points[0].y;
|
||||
double maxY = points[0].y;
|
||||
// compute the bounding coordinates (@todo: cleanup brute force)
|
||||
for (int i = 1; i < length; ++i) {
|
||||
if (points[offset + i].x < minX) {
|
||||
minX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].x > maxX) {
|
||||
maxX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].y < minY) {
|
||||
minY = points[offset + i].y;
|
||||
}
|
||||
if (points[offset + i].y > maxY) {
|
||||
maxY = points[offset + i].y;
|
||||
}
|
||||
}
|
||||
return new double[] {minX, maxX, minY, maxY};
|
||||
}
|
||||
|
||||
/**
|
||||
* Concatenate a set of points to a polygon
|
||||
*
|
||||
* @param component
|
||||
* component id of the polygon
|
||||
* @param direction
|
||||
* direction of the ring
|
||||
* @param points
|
||||
* list of points to concatenate
|
||||
* @param pointOffset
|
||||
* index of the first point
|
||||
* @param edges
|
||||
* Array of edges to write the result to
|
||||
* @param edgeOffset
|
||||
* index of the first edge in the result
|
||||
* @param length
|
||||
* number of points to use
|
||||
* @return the edges creates
|
||||
*/
|
||||
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
|
||||
int length) {
|
||||
assert edges.length >= length+edgeOffset;
|
||||
assert points.length >= length+pointOffset;
|
||||
edges[edgeOffset] = new Edge(points[pointOffset], null);
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (direction) {
|
||||
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
|
||||
edges[edgeOffset + i].component = component;
|
||||
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
|
||||
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
|
||||
edges[edgeOffset + i - 1].component = component;
|
||||
} else {
|
||||
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (direction) {
|
||||
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
|
||||
edges[edgeOffset].component = component;
|
||||
} else {
|
||||
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
|
||||
edges[edgeOffset + length - 1].component = component;
|
||||
}
|
||||
|
||||
return edges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a connected list of a list of coordinates
|
||||
*
|
||||
* @param points
|
||||
* array of point
|
||||
* @param offset
|
||||
* index of the first point
|
||||
* @param length
|
||||
* number of points
|
||||
* @return Array of edges
|
||||
*/
|
||||
protected static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
// neighbors orientation. So direction is equivalent
|
||||
// to clockwise/counterclockwise
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (offset + ((top + length - 1) % length));
|
||||
final int next = (offset + ((top + 1) % length));
|
||||
boolean orientation = points[offset + prev].x > points[offset + next].x;
|
||||
|
||||
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
|
||||
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
|
||||
// thus if orientation is computed as cw, the logic will translate points across dateline
|
||||
// and convert to a right handed system
|
||||
|
||||
// compute the bounding box and calculate range
|
||||
double[] range = range(points, offset, length);
|
||||
final double rng = range[1] - range[0];
|
||||
// translate the points if the following is true
|
||||
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
|
||||
// (translation would result in a collapsed poly)
|
||||
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
|
||||
boolean incorrectOrientation = component == 0 && handedness != orientation;
|
||||
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (shell.translated && component != 0)) {
|
||||
translate(points);
|
||||
// flip the translation bit if the shell is being translated
|
||||
if (component == 0) {
|
||||
shell.translated = true;
|
||||
}
|
||||
// correct the orientation post translation (ccw for shell, cw for holes)
|
||||
if (component == 0 || (component != 0 && handedness == orientation)) {
|
||||
orientation = !orientation;
|
||||
}
|
||||
}
|
||||
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
|
||||
*/
|
||||
protected static void translate(Coordinate[] points) {
|
||||
for (Coordinate c : points) {
|
||||
if (c.x < 0) {
|
||||
c.x += 2*DATELINE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the intersection of this line segment to the given position
|
||||
*
|
||||
@ -517,7 +373,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||
return intersect = position(coordinate, next.coordinate, position);
|
||||
}
|
||||
|
||||
public static Coordinate position(Coordinate p1, Coordinate p2, double position) {
|
||||
protected static Coordinate position(Coordinate p1, Coordinate p2, double position) {
|
||||
if (position == 0) {
|
||||
return p1;
|
||||
} else if (position == 1) {
|
||||
@ -542,7 +398,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||
public int compare(Edge o1, Edge o2) {
|
||||
return Double.compare(o1.intersect.y, o2.intersect.y);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static enum Orientation {
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.common.lease;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/** Utility methods to work with {@link Releasable}s. */
|
||||
|
@ -123,7 +123,7 @@ public abstract class ExtensionPoint {
|
||||
public static final class SelectedType<T> extends ClassMap<T> {
|
||||
|
||||
public SelectedType(String name, Class<T> extensionClass) {
|
||||
super(name, extensionClass, Collections.EMPTY_SET);
|
||||
super(name, extensionClass, Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,7 +133,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||
|
||||
/** utility method to fail the given election context under the cluster state thread */
|
||||
private void failContext(final ElectionContext context, final String reason, final Throwable throwable) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
@ -231,7 +231,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||
}
|
||||
|
||||
final String source = "zen-disco-join(elected_as_master, [" + pendingMasterJoins + "] joins received)";
|
||||
clusterService.submitStateUpdateTask(source, Priority.IMMEDIATE, new ProcessJoinsTask() {
|
||||
clusterService.submitStateUpdateTask(source, new ProcessJoinsTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
// Take into account the previous known nodes, if they happen not to be available
|
||||
@ -280,7 +280,7 @@ public class NodeJoinController extends AbstractComponent {
|
||||
|
||||
/** process all pending joins */
|
||||
private void processJoins(String reason) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(" + reason + ")", Priority.URGENT, new ProcessJoinsTask());
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(" + reason + ")", new ProcessJoinsTask(Priority.URGENT));
|
||||
}
|
||||
|
||||
|
||||
@ -356,6 +356,10 @@ public class NodeJoinController extends AbstractComponent {
|
||||
private final List<MembershipAction.JoinCallback> joinCallbacksToRespondTo = new ArrayList<>();
|
||||
private boolean nodeAdded = false;
|
||||
|
||||
public ProcessJoinsTask(Priority priority) {
|
||||
super(priority);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder nodesBuilder;
|
||||
|
@ -320,7 +320,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
} catch (FailedToCommitClusterStateException t) {
|
||||
// cluster service logs a WARN message
|
||||
logger.debug("failed to publish cluster state version [{}] (not enough nodes acknowledged, min master nodes [{}])", clusterChangedEvent.state().version(), electMaster.minimumMasterNodes());
|
||||
clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return rejoin(currentState, "failed to publish to min_master_nodes");
|
||||
@ -498,7 +498,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
return;
|
||||
}
|
||||
if (localNodeMaster()) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id());
|
||||
@ -538,7 +538,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
// nothing to do here...
|
||||
return;
|
||||
}
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
if (currentState.nodes().get(node.id()) == null) {
|
||||
@ -587,7 +587,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
// We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership.
|
||||
return;
|
||||
}
|
||||
clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
// check if we have enough master nodes, if not, we need to move into joining the cluster again
|
||||
@ -627,7 +627,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
|
||||
logger.info("master_left [{}], reason [{}]", cause, masterNode, reason);
|
||||
|
||||
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
@ -694,7 +694,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
}
|
||||
|
||||
void processNextPendingClusterState(String reason) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
@ -1059,7 +1059,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
return;
|
||||
}
|
||||
logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get());
|
||||
clusterService.submitStateUpdateTask("ping from another master", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("ping from another master", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
@ -1114,7 +1114,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
||||
class RejoinClusterRequestHandler implements TransportRequestHandler<RejoinClusterRequest> {
|
||||
@Override
|
||||
public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception {
|
||||
clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
@ -41,7 +42,7 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
|
||||
public class NodesFaultDetection extends FaultDetection {
|
||||
|
||||
public static final String PING_ACTION_NAME = "internal:discovery/zen/fd/ping";
|
||||
|
||||
|
||||
public abstract static class Listener {
|
||||
|
||||
public void onNodeFailure(DiscoveryNode node, String reason) {}
|
||||
@ -145,14 +146,18 @@ public class NodesFaultDetection extends FaultDetection {
|
||||
}
|
||||
|
||||
private void notifyNodeFailure(final DiscoveryNode node, final String reason) {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
for (Listener listener : listeners) {
|
||||
listener.onNodeFailure(node, reason);
|
||||
try {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
for (Listener listener : listeners) {
|
||||
listener.onNodeFailure(node, reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.trace("[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", ex, node, reason);
|
||||
}
|
||||
}
|
||||
|
||||
private void notifyPingReceived(final PingRequest pingRequest) {
|
||||
|
@ -58,6 +58,8 @@ public class Environment {
|
||||
|
||||
private final Path pluginsFile;
|
||||
|
||||
private final Path modulesFile;
|
||||
|
||||
private final Path sharedDataFile;
|
||||
|
||||
/** location of bin/, used by plugin manager */
|
||||
@ -157,6 +159,7 @@ public class Environment {
|
||||
|
||||
binFile = homeFile.resolve("bin");
|
||||
libFile = homeFile.resolve("lib");
|
||||
modulesFile = homeFile.resolve("modules");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -275,6 +278,10 @@ public class Environment {
|
||||
return libFile;
|
||||
}
|
||||
|
||||
public Path modulesFile() {
|
||||
return modulesFile;
|
||||
}
|
||||
|
||||
public Path logsFile() {
|
||||
return logsFile;
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
||||
if (metaData != null) {
|
||||
ShardPath shardPath = null;
|
||||
try {
|
||||
IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.EMPTY_LIST);
|
||||
IndexSettings indexSettings = new IndexSettings(metaData, settings, Collections.emptyList());
|
||||
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
|
||||
if (shardPath == null) {
|
||||
throw new IllegalStateException(shardId + " no shard path found");
|
||||
|
@ -55,7 +55,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||
private final Environment environemnt;
|
||||
|
||||
public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
|
||||
this(hunspellService, environment, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
|
||||
this(hunspellService, environment, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
public AnalysisRegistry(HunspellService hunspellService, Environment environment,
|
||||
|
@ -19,22 +19,32 @@
|
||||
package org.elasticsearch.index.fieldvisitor;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A field visitor that allows to load a selection of the stored fields.
|
||||
* A field visitor that allows to load a selection of the stored fields by exact name or by pattern.
|
||||
* Supported pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy".
|
||||
* The Uid field is always loaded.
|
||||
* The class is optimized for source loading as it is a common use case.
|
||||
*/
|
||||
public class CustomFieldsVisitor extends FieldsVisitor {
|
||||
|
||||
private final Set<String> fields;
|
||||
private final List<String> patterns;
|
||||
|
||||
public CustomFieldsVisitor(Set<String> fields, boolean loadSource) {
|
||||
public CustomFieldsVisitor(Set<String> fields, List<String> patterns, boolean loadSource) {
|
||||
super(loadSource);
|
||||
this.fields = fields;
|
||||
this.patterns = patterns;
|
||||
}
|
||||
|
||||
public CustomFieldsVisitor(Set<String> fields, boolean loadSource) {
|
||||
this(fields, Collections.emptyList(), loadSource);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -42,7 +52,14 @@ public class CustomFieldsVisitor extends FieldsVisitor {
|
||||
if (super.needsField(fieldInfo) == Status.YES) {
|
||||
return Status.YES;
|
||||
}
|
||||
|
||||
return fields.contains(fieldInfo.name) ? Status.YES : Status.NO;
|
||||
if (fields.contains(fieldInfo.name)) {
|
||||
return Status.YES;
|
||||
}
|
||||
for (String pattern : patterns) {
|
||||
if (Regex.simpleMatch(pattern, fieldInfo.name)) {
|
||||
return Status.YES;
|
||||
}
|
||||
}
|
||||
return Status.NO;
|
||||
}
|
||||
}
|
||||
|
@ -336,8 +336,6 @@ public class DocumentMapper implements ToXContent {
|
||||
|
||||
private void addMappers(Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
|
||||
assert mappingLock.isWriteLockedByCurrentThread();
|
||||
// first ensure we don't have any incompatible new fields
|
||||
mapperService.checkNewMappersCompatibility(objectMappers, fieldMappers, updateAllTypes);
|
||||
|
||||
// update mappers for this document type
|
||||
Map<String, ObjectMapper> builder = new HashMap<>(this.objectMappers);
|
||||
@ -351,11 +349,12 @@ public class DocumentMapper implements ToXContent {
|
||||
this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers);
|
||||
|
||||
// finally update for the entire index
|
||||
mapperService.addMappers(objectMappers, fieldMappers);
|
||||
mapperService.addMappers(type, objectMappers, fieldMappers);
|
||||
}
|
||||
|
||||
public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
|
||||
try (ReleasableLock lock = mappingWriteLock.acquire()) {
|
||||
mapperService.checkMappersCompatibility(type, mapping, updateAllTypes);
|
||||
final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes);
|
||||
this.mapping.merge(mapping, mergeResult);
|
||||
if (simulate == false) {
|
||||
|
@ -28,8 +28,6 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper.DateFieldType;
|
||||
@ -47,7 +45,6 @@ import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/** A parser for documents, given mappings from a DocumentMapper */
|
||||
@ -79,6 +76,10 @@ class DocumentParser implements Closeable {
|
||||
}
|
||||
|
||||
private ParsedDocument innerParseDocument(SourceToParse source) throws MapperParsingException {
|
||||
if (docMapper.type().equals(MapperService.DEFAULT_MAPPING)) {
|
||||
throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]");
|
||||
}
|
||||
|
||||
ParseContext.InternalParseContext context = cache.get();
|
||||
|
||||
final Mapping mapping = docMapper.mapping();
|
||||
@ -712,37 +713,64 @@ class DocumentParser implements Closeable {
|
||||
// The path of the dest field might be completely different from the current one so we need to reset it
|
||||
context = context.overridePath(new ContentPath(0));
|
||||
|
||||
String[] paths = Strings.splitStringToArray(field, '.');
|
||||
String fieldName = paths[paths.length-1];
|
||||
ObjectMapper mapper = context.root();
|
||||
String objectPath = "";
|
||||
String fieldPath = field;
|
||||
int posDot = field.lastIndexOf('.');
|
||||
if (posDot > 0) {
|
||||
objectPath = field.substring(0, posDot);
|
||||
context.path().add(objectPath);
|
||||
mapper = context.docMapper().objectMappers().get(objectPath);
|
||||
fieldPath = field.substring(posDot + 1);
|
||||
ObjectMapper[] mappers = new ObjectMapper[paths.length-1];
|
||||
if (paths.length > 1) {
|
||||
ObjectMapper parent = context.root();
|
||||
for (int i = 0; i < paths.length-1; i++) {
|
||||
mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i]));
|
||||
if (mapper == null) {
|
||||
// One mapping is missing, check if we are allowed to create a dynamic one.
|
||||
ObjectMapper.Dynamic dynamic = parent.dynamic();
|
||||
if (dynamic == null) {
|
||||
dynamic = dynamicOrDefault(context.root().dynamic());
|
||||
}
|
||||
|
||||
switch (dynamic) {
|
||||
case STRICT:
|
||||
throw new StrictDynamicMappingException(parent.fullPath(), paths[i]);
|
||||
case TRUE:
|
||||
Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], "object");
|
||||
if (builder == null) {
|
||||
// if this is a non root object, then explicitly set the dynamic behavior if set
|
||||
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
|
||||
((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
|
||||
}
|
||||
builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType());
|
||||
}
|
||||
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
|
||||
mapper = (ObjectMapper) builder.build(builderContext);
|
||||
if (mapper.nested() != ObjectMapper.Nested.NO) {
|
||||
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`");
|
||||
}
|
||||
break;
|
||||
case FALSE:
|
||||
// Maybe we should log something to tell the user that the copy_to is ignored in this case.
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Unexpected dynamic type " + dynamic);
|
||||
|
||||
}
|
||||
}
|
||||
context.path().add(paths[i]);
|
||||
mappers[i] = mapper;
|
||||
parent = mapper;
|
||||
}
|
||||
}
|
||||
if (mapper == null) {
|
||||
//TODO: Create an object dynamically?
|
||||
throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]");
|
||||
}
|
||||
ObjectMapper update = parseDynamicValue(context, mapper, fieldPath, context.parser().currentToken());
|
||||
ObjectMapper update = parseDynamicValue(context, mapper, fieldName, context.parser().currentToken());
|
||||
assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping
|
||||
|
||||
// propagate the update to the root
|
||||
while (objectPath.length() > 0) {
|
||||
String parentPath = "";
|
||||
ObjectMapper parent = context.root();
|
||||
posDot = objectPath.lastIndexOf('.');
|
||||
if (posDot > 0) {
|
||||
parentPath = objectPath.substring(0, posDot);
|
||||
parent = context.docMapper().objectMappers().get(parentPath);
|
||||
if (paths.length > 1) {
|
||||
for (int i = paths.length - 2; i >= 0; i--) {
|
||||
ObjectMapper parent = context.root();
|
||||
if (i > 0) {
|
||||
parent = mappers[i-1];
|
||||
}
|
||||
assert parent != null;
|
||||
update = parent.mappingUpdate(update);
|
||||
}
|
||||
if (parent == null) {
|
||||
throw new IllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]");
|
||||
}
|
||||
update = parent.mappingUpdate(update);
|
||||
objectPath = parentPath;
|
||||
}
|
||||
context.addDynamicMappingsUpdate(update);
|
||||
}
|
||||
|
@ -307,7 +307,6 @@ public abstract class FieldMapper extends Mapper {
|
||||
if (ref.get().equals(fieldType()) == false) {
|
||||
throw new IllegalStateException("Cannot overwrite field type reference to unequal reference");
|
||||
}
|
||||
ref.incrementAssociatedMappers();
|
||||
this.fieldTypeRef = ref;
|
||||
}
|
||||
|
||||
@ -360,7 +359,7 @@ public abstract class FieldMapper extends Mapper {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
if (!this.getClass().equals(mergeWith.getClass())) {
|
||||
String mergedType = mergeWith.getClass().getSimpleName();
|
||||
if (mergeWith instanceof FieldMapper) {
|
||||
@ -371,20 +370,6 @@ public abstract class FieldMapper extends Mapper {
|
||||
return;
|
||||
}
|
||||
FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
|
||||
List<String> subConflicts = new ArrayList<>(); // TODO: just expose list from MergeResult?
|
||||
fieldType().checkTypeName(fieldMergeWith.fieldType(), subConflicts);
|
||||
if (subConflicts.isEmpty() == false) {
|
||||
// return early if field types don't match
|
||||
assert subConflicts.size() == 1;
|
||||
mergeResult.addConflict(subConflicts.get(0));
|
||||
return;
|
||||
}
|
||||
|
||||
boolean strict = this.fieldTypeRef.getNumAssociatedMappers() > 1 && mergeResult.updateAllTypes() == false;
|
||||
fieldType().checkCompatibility(fieldMergeWith.fieldType(), subConflicts, strict);
|
||||
for (String conflict : subConflicts) {
|
||||
mergeResult.addConflict(conflict);
|
||||
}
|
||||
multiFields.merge(mergeWith, mergeResult);
|
||||
|
||||
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
|
||||
@ -614,7 +599,7 @@ public abstract class FieldMapper extends Mapper {
|
||||
}
|
||||
|
||||
// No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException {
|
||||
public void merge(Mapper mergeWith, MergeResult mergeResult) {
|
||||
FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
|
||||
|
||||
List<FieldMapper> newFieldMappers = null;
|
||||
|
@ -24,9 +24,11 @@ import org.elasticsearch.common.regex.Regex;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
@ -37,18 +39,49 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
||||
/** Full field name to field type */
|
||||
private final CopyOnWriteHashMap<String, MappedFieldTypeReference> fullNameToFieldType;
|
||||
|
||||
/** Full field name to types containing a mapping for this full name. */
|
||||
private final CopyOnWriteHashMap<String, Set<String>> fullNameToTypes;
|
||||
|
||||
/** Index field name to field type */
|
||||
private final CopyOnWriteHashMap<String, MappedFieldTypeReference> indexNameToFieldType;
|
||||
|
||||
/** Index field name to types containing a mapping for this index name. */
|
||||
private final CopyOnWriteHashMap<String, Set<String>> indexNameToTypes;
|
||||
|
||||
/** Create a new empty instance. */
|
||||
public FieldTypeLookup() {
|
||||
fullNameToFieldType = new CopyOnWriteHashMap<>();
|
||||
fullNameToTypes = new CopyOnWriteHashMap<>();
|
||||
indexNameToFieldType = new CopyOnWriteHashMap<>();
|
||||
indexNameToTypes = new CopyOnWriteHashMap<>();
|
||||
}
|
||||
|
||||
private FieldTypeLookup(CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName, CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName) {
|
||||
fullNameToFieldType = fullName;
|
||||
indexNameToFieldType = indexName;
|
||||
private FieldTypeLookup(
|
||||
CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName,
|
||||
CopyOnWriteHashMap<String, Set<String>> fullNameToTypes,
|
||||
CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName,
|
||||
CopyOnWriteHashMap<String, Set<String>> indexNameToTypes) {
|
||||
this.fullNameToFieldType = fullName;
|
||||
this.fullNameToTypes = fullNameToTypes;
|
||||
this.indexNameToFieldType = indexName;
|
||||
this.indexNameToTypes = indexNameToTypes;
|
||||
}
|
||||
|
||||
private static CopyOnWriteHashMap<String, Set<String>> addType(CopyOnWriteHashMap<String, Set<String>> map, String key, String type) {
|
||||
Set<String> types = map.get(key);
|
||||
if (types == null) {
|
||||
return map.copyAndPut(key, Collections.singleton(type));
|
||||
} else if (types.contains(type)) {
|
||||
// noting to do
|
||||
return map;
|
||||
} else {
|
||||
Set<String> newTypes = new HashSet<>(types.size() + 1);
|
||||
newTypes.addAll(types);
|
||||
newTypes.add(type);
|
||||
assert newTypes.size() == types.size() + 1;
|
||||
newTypes = Collections.unmodifiableSet(newTypes);
|
||||
return map.copyAndPut(key, newTypes);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -56,9 +89,15 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
||||
* from the provided fields. If a field already exists, the field type will be updated
|
||||
* to use the new mappers field type.
|
||||
*/
|
||||
public FieldTypeLookup copyAndAddAll(Collection<FieldMapper> newFieldMappers) {
|
||||
public FieldTypeLookup copyAndAddAll(String type, Collection<FieldMapper> newFieldMappers) {
|
||||
Objects.requireNonNull(type, "type must not be null");
|
||||
if (MapperService.DEFAULT_MAPPING.equals(type)) {
|
||||
throw new IllegalArgumentException("Default mappings should not be added to the lookup");
|
||||
}
|
||||
CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName = this.fullNameToFieldType;
|
||||
CopyOnWriteHashMap<String, Set<String>> fullNameToTypes = this.fullNameToTypes;
|
||||
CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName = this.indexNameToFieldType;
|
||||
CopyOnWriteHashMap<String, Set<String>> indexNameToTypes = this.indexNameToTypes;
|
||||
|
||||
for (FieldMapper fieldMapper : newFieldMappers) {
|
||||
MappedFieldType fieldType = fieldMapper.fieldType();
|
||||
@ -86,8 +125,23 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
||||
// this new field bridges between two existing field names (a full and index name), which we cannot support
|
||||
throw new IllegalStateException("insane mappings found. field " + fieldType.names().fullName() + " maps across types to field " + fieldType.names().indexName());
|
||||
}
|
||||
|
||||
fullNameToTypes = addType(fullNameToTypes, fieldType.names().fullName(), type);
|
||||
indexNameToTypes = addType(indexNameToTypes, fieldType.names().indexName(), type);
|
||||
}
|
||||
return new FieldTypeLookup(fullName, fullNameToTypes, indexName, indexNameToTypes);
|
||||
}
|
||||
|
||||
private static boolean beStrict(String type, Set<String> types, boolean updateAllTypes) {
|
||||
assert types.size() >= 1;
|
||||
if (updateAllTypes) {
|
||||
return false;
|
||||
} else if (types.size() == 1 && types.contains(type)) {
|
||||
// we are implicitly updating all types
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
return new FieldTypeLookup(fullName, indexName);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -95,16 +149,14 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
||||
* If any are not compatible, an IllegalArgumentException is thrown.
|
||||
* If updateAllTypes is true, only basic compatibility is checked.
|
||||
*/
|
||||
public void checkCompatibility(Collection<FieldMapper> newFieldMappers, boolean updateAllTypes) {
|
||||
for (FieldMapper fieldMapper : newFieldMappers) {
|
||||
public void checkCompatibility(String type, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
|
||||
for (FieldMapper fieldMapper : fieldMappers) {
|
||||
MappedFieldTypeReference ref = fullNameToFieldType.get(fieldMapper.fieldType().names().fullName());
|
||||
if (ref != null) {
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
ref.get().checkTypeName(fieldMapper.fieldType(), conflicts);
|
||||
if (conflicts.isEmpty()) { // only check compat if they are the same type
|
||||
boolean strict = updateAllTypes == false;
|
||||
ref.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
|
||||
}
|
||||
final Set<String> types = fullNameToTypes.get(fieldMapper.fieldType().names().fullName());
|
||||
boolean strict = beStrict(type, types, updateAllTypes);
|
||||
ref.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with existing mapping in other types:\n" + conflicts.toString());
|
||||
}
|
||||
@ -114,11 +166,9 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
||||
MappedFieldTypeReference indexNameRef = indexNameToFieldType.get(fieldMapper.fieldType().names().indexName());
|
||||
if (indexNameRef != null) {
|
||||
List<String> conflicts = new ArrayList<>();
|
||||
indexNameRef.get().checkTypeName(fieldMapper.fieldType(), conflicts);
|
||||
if (conflicts.isEmpty()) { // only check compat if they are the same type
|
||||
boolean strict = updateAllTypes == false;
|
||||
indexNameRef.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
|
||||
}
|
||||
final Set<String> types = indexNameToTypes.get(fieldMapper.fieldType().names().indexName());
|
||||
boolean strict = beStrict(type, types, updateAllTypes);
|
||||
indexNameRef.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
|
||||
if (conflicts.isEmpty() == false) {
|
||||
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with mapping with the same index name in other types" + conflicts.toString());
|
||||
}
|
||||
@ -133,6 +183,15 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
||||
return ref.get();
|
||||
}
|
||||
|
||||
/** Get the set of types that have a mapping for the given field. */
|
||||
public Set<String> getTypes(String field) {
|
||||
Set<String> types = fullNameToTypes.get(field);
|
||||
if (types == null) {
|
||||
types = Collections.emptySet();
|
||||
}
|
||||
return types;
|
||||
}
|
||||
|
||||
/** Returns the field type for the given index name */
|
||||
public MappedFieldType getByIndexName(String field) {
|
||||
MappedFieldTypeReference ref = indexNameToFieldType.get(field);
|
||||
@ -140,6 +199,15 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
||||
return ref.get();
|
||||
}
|
||||
|
||||
/** Get the set of types that have a mapping for the given field. */
|
||||
public Set<String> getTypesByIndexName(String field) {
|
||||
Set<String> types = indexNameToTypes.get(field);
|
||||
if (types == null) {
|
||||
types = Collections.emptySet();
|
||||
}
|
||||
return types;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of the index names of a simple match regex like pattern against full name and index name.
|
||||
*/
|
||||
|
@ -229,9 +229,9 @@ public abstract class MappedFieldType extends FieldType {
|
||||
public abstract String typeName();
|
||||
|
||||
/** Checks this type is the same type as other. Adds a conflict if they are different. */
|
||||
public final void checkTypeName(MappedFieldType other, List<String> conflicts) {
|
||||
private final void checkTypeName(MappedFieldType other) {
|
||||
if (typeName().equals(other.typeName()) == false) {
|
||||
conflicts.add("mapper [" + names().fullName() + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]");
|
||||
throw new IllegalArgumentException("mapper [" + names().fullName() + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]");
|
||||
} else if (getClass() != other.getClass()) {
|
||||
throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName());
|
||||
}
|
||||
@ -243,6 +243,8 @@ public abstract class MappedFieldType extends FieldType {
|
||||
* Otherwise, only properties which must never change in an index are checked.
|
||||
*/
|
||||
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) {
|
||||
checkTypeName(other);
|
||||
|
||||
boolean indexed = indexOptions() != IndexOptions.NONE;
|
||||
boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE;
|
||||
// TODO: should be validating if index options go "up" (but "down" is ok)
|
||||
|
@ -23,12 +23,10 @@ package org.elasticsearch.index.mapper;
|
||||
*/
|
||||
public class MappedFieldTypeReference {
|
||||
private MappedFieldType fieldType; // the current field type this reference points to
|
||||
private int numAssociatedMappers;
|
||||
|
||||
public MappedFieldTypeReference(MappedFieldType fieldType) {
|
||||
fieldType.freeze(); // ensure frozen
|
||||
this.fieldType = fieldType;
|
||||
this.numAssociatedMappers = 1;
|
||||
}
|
||||
|
||||
public MappedFieldType get() {
|
||||
@ -40,11 +38,4 @@ public class MappedFieldTypeReference {
|
||||
this.fieldType = fieldType;
|
||||
}
|
||||
|
||||
public int getNumAssociatedMappers() {
|
||||
return numAssociatedMappers;
|
||||
}
|
||||
|
||||
public void incrementAssociatedMappers() {
|
||||
++numAssociatedMappers;
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user