merge master
This commit is contained in:
commit
95e8a39b9b
|
@ -0,0 +1,10 @@
|
|||
# EditorConfig: http://editorconfig.org/
|
||||
|
||||
root = true
|
||||
|
||||
[*.java]
|
||||
charset = utf-8
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
|
@ -149,17 +149,23 @@ gradle test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix)
|
|||
|
||||
=== Load balancing and caches.
|
||||
|
||||
By default, the tests run sequentially on a single forked JVM.
|
||||
|
||||
To run with more forked JVMs than the default use:
|
||||
By default the tests run on up to 4 JVMs based on the number of cores. If you
|
||||
want to explicitly specify the number of JVMs you can do so on the command
|
||||
line:
|
||||
|
||||
----------------------------
|
||||
gradle test -Dtests.jvms=8
|
||||
----------------------------
|
||||
|
||||
Don't count hypercores for CPU-intense tests and leave some slack
|
||||
for JVM-internal threads (like the garbage collector). Make sure there is
|
||||
enough RAM to handle child JVMs.
|
||||
Or in `~/.gradle/gradle.properties`:
|
||||
|
||||
----------------------------
|
||||
systemProp.tests.jvms=8
|
||||
----------------------------
|
||||
|
||||
Its difficult to pick the "right" number here. Hypercores don't count for CPU
|
||||
intensive tests and you should leave some slack for JVM-interal threads like
|
||||
the garbage collector. And you have to have enough RAM to handle each JVM.
|
||||
|
||||
=== Test compatibility.
|
||||
|
||||
|
@ -280,11 +286,20 @@ The REST layer is tested through specific tests that are shared between all
|
|||
the elasticsearch official clients and consist of YAML files that describe the
|
||||
operations to be executed and the obtained results that need to be tested.
|
||||
|
||||
The REST tests are run automatically when executing the maven test command. To run only the
|
||||
The REST tests are run automatically when executing the "gradle check" command. To run only the
|
||||
REST tests use the following command:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle integTest -Dtests.filter="@Rest"
|
||||
gradle :distribution:tar:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
A specific test case can be run with
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT \
|
||||
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
`RestNIT` are the executable test classes that runs all the
|
||||
|
|
18
build.gradle
18
build.gradle
|
@ -45,7 +45,7 @@ subprojects {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
extraArchive {
|
||||
javadoc = true
|
||||
tests = false
|
||||
|
@ -86,8 +86,8 @@ subprojects {
|
|||
tasks.withType(Jar) {
|
||||
into('META-INF') {
|
||||
from project.rootProject.rootDir
|
||||
include 'LICENSE.txt'
|
||||
include 'NOTICE.txt'
|
||||
include 'LICENSE.txt'
|
||||
include 'NOTICE.txt'
|
||||
}
|
||||
}
|
||||
// ignore missing javadocs
|
||||
|
@ -101,12 +101,19 @@ subprojects {
|
|||
}
|
||||
}
|
||||
|
||||
/* Sets up the dependencies that we build as part of this project but
|
||||
register as thought they were external to resolve internally. We register
|
||||
them as external dependencies so the build plugin that we use can be used
|
||||
to build elasticsearch plugins outside of the elasticsearch source tree. */
|
||||
ext.projectSubstitutions = [
|
||||
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
|
||||
"org.elasticsearch:elasticsearch:${version}": ':core',
|
||||
"org.elasticsearch:test-framework:${version}": ':test-framework',
|
||||
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar'
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
|
||||
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
|
||||
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
|
||||
]
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
|
@ -226,7 +233,7 @@ class Run extends DefaultTask {
|
|||
)
|
||||
public void setDebug(boolean enabled) {
|
||||
project.project(':distribution').run.clusterConfig.debug = enabled
|
||||
}
|
||||
}
|
||||
}
|
||||
task run(type: Run) {
|
||||
dependsOn ':distribution:run'
|
||||
|
@ -234,4 +241,3 @@ task run(type: Run) {
|
|||
group = 'Verification'
|
||||
impliesSubProjects = true
|
||||
}
|
||||
|
||||
|
|
|
@ -80,3 +80,13 @@ eclipse {
|
|||
defaultOutputDir = new File(file('build'), 'eclipse')
|
||||
}
|
||||
}
|
||||
|
||||
task copyEclipseSettings(type: Copy) {
|
||||
from project.file('src/main/resources/eclipse.settings')
|
||||
into '.settings'
|
||||
}
|
||||
// otherwise .settings is not nuked entirely
|
||||
tasks.cleanEclipse {
|
||||
delete '.settings'
|
||||
}
|
||||
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
class TestLoggingConfiguration {
|
||||
|
@ -20,6 +21,10 @@ class TestLoggingConfiguration {
|
|||
SlowTestsConfiguration slowTests = new SlowTestsConfiguration()
|
||||
StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration()
|
||||
|
||||
/** Summarize the first N failures at the end of the test. */
|
||||
@Input
|
||||
int showNumFailuresAtEnd = 3 // match TextReport default
|
||||
|
||||
void slowTests(Closure closure) {
|
||||
ConfigureUtil.configure(closure, slowTests)
|
||||
}
|
||||
|
@ -31,4 +36,8 @@ class TestLoggingConfiguration {
|
|||
void outputMode(String mode) {
|
||||
outputMode = mode.toUpperCase() as OutputMode
|
||||
}
|
||||
|
||||
void showNumFailuresAtEnd(int n) {
|
||||
showNumFailuresAtEnd = n
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,9 +48,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
|
|||
/** Format line for JVM ID string. */
|
||||
String jvmIdFormat
|
||||
|
||||
/** Summarize the first N failures at the end. */
|
||||
int showNumFailuresAtEnd = 3
|
||||
|
||||
/** Output stream that logs messages to the given logger */
|
||||
LoggingOutputStream outStream
|
||||
LoggingOutputStream errStream
|
||||
|
@ -110,13 +107,13 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
|
|||
|
||||
@Subscribe
|
||||
void onQuit(AggregatedQuitEvent e) throws IOException {
|
||||
if (showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
|
||||
if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
|
||||
List<Description> sublist = this.failedTests
|
||||
StringBuilder b = new StringBuilder()
|
||||
b.append('Tests with failures')
|
||||
if (sublist.size() > showNumFailuresAtEnd) {
|
||||
sublist = sublist.subList(0, showNumFailuresAtEnd)
|
||||
b.append(" (first " + showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
|
||||
if (sublist.size() > config.showNumFailuresAtEnd) {
|
||||
sublist = sublist.subList(0, config.showNumFailuresAtEnd)
|
||||
b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
|
||||
}
|
||||
b.append(':\n')
|
||||
for (Description description : sublist) {
|
||||
|
|
|
@ -62,7 +62,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
configureCompile(project)
|
||||
|
||||
configureTest(project)
|
||||
PrecommitTasks.configure(project)
|
||||
configurePrecommit(project)
|
||||
}
|
||||
|
||||
/** Performs checks on the build environment and prints information about the build environment. */
|
||||
|
@ -283,16 +283,24 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
/** Adds compiler settings to the project */
|
||||
static void configureCompile(Project project) {
|
||||
project.ext.compactProfile = 'compact3'
|
||||
project.afterEvaluate {
|
||||
// fail on all javac warnings
|
||||
project.tasks.withType(JavaCompile) {
|
||||
options.fork = true
|
||||
options.forkOptions.executable = new File(project.javaHome, 'bin/javac')
|
||||
options.forkOptions.memoryMaximumSize = "1g"
|
||||
/*
|
||||
* -path because gradle will send in paths that don't always exist.
|
||||
* -missing because we have tons of missing @returns and @param.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
options.encoding = 'UTF-8'
|
||||
}
|
||||
}
|
||||
|
@ -363,6 +371,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
enableSystemAssertions false
|
||||
|
||||
testLogging {
|
||||
showNumFailuresAtEnd 25
|
||||
slowTests {
|
||||
heartbeat 10
|
||||
summarySize 5
|
||||
|
@ -407,4 +416,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
return test
|
||||
}
|
||||
|
||||
private static configurePrecommit(Project project) {
|
||||
Task precommit = PrecommitTasks.create(project, true)
|
||||
project.check.dependsOn(precommit)
|
||||
project.test.mustRunAfter(precommit)
|
||||
project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,18 +17,26 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.tasks.Exec
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
// LZF compressor with write support, for testing only
|
||||
public class LZFTestCompressor extends LZFCompressor {
|
||||
|
||||
@Override
|
||||
public StreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
return new LZFCompressedStreamOutput(out);
|
||||
/**
|
||||
* A wrapper around gradle's Exec task to capture output and log on error.
|
||||
*/
|
||||
class LoggedExec extends Exec {
|
||||
LoggedExec() {
|
||||
if (logger.isInfoEnabled() == false) {
|
||||
standardOutput = new ByteArrayOutputStream()
|
||||
errorOutput = standardOutput
|
||||
ignoreExitValue = true
|
||||
doLast {
|
||||
if (execResult.exitValue != 0) {
|
||||
standardOutput.toString('UTF-8').eachLine { line -> logger.error(line) }
|
||||
throw new GradleException("Process '${executable} ${args.join(' ')}' finished with non-zero exit value ${execResult.exitValue}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -23,40 +23,41 @@ import org.elasticsearch.gradle.test.RestIntegTestTask
|
|||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.bundling.Zip
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for an Elasticsearch plugin.
|
||||
*/
|
||||
class PluginBuildPlugin extends BuildPlugin {
|
||||
public class PluginBuildPlugin extends BuildPlugin {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
super.apply(project)
|
||||
configureDependencies(project)
|
||||
// this afterEvaluate must happen before the afterEvaluate added by integTest configure,
|
||||
// this afterEvaluate must happen before the afterEvaluate added by integTest creation,
|
||||
// so that the file name resolution for installing the plugin will be setup
|
||||
project.afterEvaluate {
|
||||
String name = project.pluginProperties.extension.name
|
||||
project.jar.baseName = name
|
||||
project.bundlePlugin.baseName = name
|
||||
|
||||
project.integTest.dependsOn(project.bundlePlugin)
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.dependsOn(project.bundlePlugin)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
RestIntegTestTask.configure(project)
|
||||
RunTask.configure(project)
|
||||
Task bundle = configureBundleTask(project)
|
||||
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
|
||||
project.configurations.getByName('default').extendsFrom = []
|
||||
project.artifacts {
|
||||
archives bundle
|
||||
'default' bundle
|
||||
if (project.path.startsWith(':modules:')) {
|
||||
project.integTest.clusterConfig.module(project)
|
||||
project.tasks.run.clusterConfig.module(project)
|
||||
} else {
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
|
||||
}
|
||||
|
||||
static void configureDependencies(Project project) {
|
||||
private static void configureDependencies(Project project) {
|
||||
project.dependencies {
|
||||
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}"
|
||||
|
@ -72,21 +73,36 @@ class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
}
|
||||
|
||||
static Task configureBundleTask(Project project) {
|
||||
PluginPropertiesTask buildProperties = project.tasks.create(name: 'pluginProperties', type: PluginPropertiesTask)
|
||||
File pluginMetadata = project.file("src/main/plugin-metadata")
|
||||
project.sourceSets.test {
|
||||
output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
|
||||
resources {
|
||||
srcDir pluginMetadata
|
||||
}
|
||||
}
|
||||
Task bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties])
|
||||
bundle.configure {
|
||||
from buildProperties
|
||||
from pluginMetadata
|
||||
from project.jar
|
||||
from bundle.project.configurations.runtime - bundle.project.configurations.provided
|
||||
/** Adds an integTest task which runs rest tests */
|
||||
private static void createIntegTestTask(Project project) {
|
||||
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
|
||||
integTest.mustRunAfter(project.precommit, project.test)
|
||||
project.check.dependsOn(integTest)
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a bundlePlugin task which builds the zip containing the plugin jars,
|
||||
* metadata, properties, and packaging files
|
||||
*/
|
||||
private static void createBundleTask(Project project) {
|
||||
File pluginMetadata = project.file('src/main/plugin-metadata')
|
||||
|
||||
// create a task to build the properties file for this plugin
|
||||
PluginPropertiesTask buildProperties = project.tasks.create('pluginProperties', PluginPropertiesTask.class)
|
||||
|
||||
// add the plugin properties and metadata to test resources, so unit tests can
|
||||
// know about the plugin (used by test security code to statically initialize the plugin in unit tests)
|
||||
SourceSet testSourceSet = project.sourceSets.test
|
||||
testSourceSet.output.dir(buildProperties.generatedResourcesDir, builtBy: 'pluginProperties')
|
||||
testSourceSet.resources.srcDir(pluginMetadata)
|
||||
|
||||
// create the actual bundle task, which zips up all the files for the plugin
|
||||
Zip bundle = project.tasks.create(name: 'bundlePlugin', type: Zip, dependsOn: [project.jar, buildProperties]) {
|
||||
from buildProperties // plugin properties file
|
||||
from pluginMetadata // metadata (eg custom security policy)
|
||||
from project.jar // this plugin's jar
|
||||
from project.configurations.runtime - project.configurations.provided // the dep jars
|
||||
// extra files for the plugin to go into the zip
|
||||
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
|
||||
from('src/main') {
|
||||
include 'config/**'
|
||||
|
@ -97,6 +113,13 @@ class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
return bundle
|
||||
|
||||
// remove jar from the archives (things that will be published), and set it to the zip
|
||||
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
|
||||
project.artifacts.add('archives', bundle)
|
||||
|
||||
// also make the zip the default artifact (used when depending on this project)
|
||||
project.configurations.getByName('default').extendsFrom = []
|
||||
project.artifacts.add('default', bundle)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,64 +18,104 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.StopActionException
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.VerificationTask
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.security.MessageDigest
|
||||
import java.util.regex.Matcher
|
||||
import java.util.regex.Pattern
|
||||
|
||||
class DependencyLicensesTask extends DefaultTask {
|
||||
/**
|
||||
* A task to check licenses for dependencies.
|
||||
*
|
||||
* There are two parts to the check:
|
||||
* <ul>
|
||||
* <li>LICENSE and NOTICE files</li>
|
||||
* <li>SHA checksums for each dependency jar</li>
|
||||
* </ul>
|
||||
*
|
||||
* The directory to find the license and sha files in defaults to the dir @{code licenses}
|
||||
* in the project directory for this task. You can override this directory:
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* licensesDir = project.file('mybetterlicensedir')
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* The jar files to check default to the dependencies from the default configuration. You
|
||||
* can override this, for example, to only check compile dependencies:
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* dependencies = project.configurations.compile
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* Every jar must have a {@code .sha1} file in the licenses dir. These can be managed
|
||||
* automatically using the {@code updateShas} helper task that is created along
|
||||
* with this task. It will add {@code .sha1} files for new jars that are in dependencies
|
||||
* and remove old {@code .sha1} files that are no longer needed.
|
||||
*
|
||||
* Every jar must also have a LICENSE and NOTICE file. However, multiple jars can share
|
||||
* LICENSE and NOTICE files by mapping a pattern to the same name.
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* mapping from: /lucene-.*/, to: 'lucene'
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
public class DependencyLicensesTask extends DefaultTask {
|
||||
static final String SHA_EXTENSION = '.sha1'
|
||||
|
||||
static Task configure(Project project, Closure closure) {
|
||||
DependencyLicensesTask task = project.tasks.create(type: DependencyLicensesTask, name: 'dependencyLicenses')
|
||||
UpdateShasTask update = project.tasks.create(type: UpdateShasTask, name: 'updateShas')
|
||||
update.parentTask = task
|
||||
task.configure(closure)
|
||||
project.check.dependsOn(task)
|
||||
return task
|
||||
}
|
||||
|
||||
// TODO: we should be able to default this to eg compile deps, but we need to move the licenses
|
||||
// check from distribution to core (ie this should only be run on java projects)
|
||||
/** A collection of jar files that should be checked. */
|
||||
@InputFiles
|
||||
FileCollection dependencies
|
||||
public FileCollection dependencies
|
||||
|
||||
/** The directory to find the license and sha files in. */
|
||||
@InputDirectory
|
||||
File licensesDir = new File(project.projectDir, 'licenses')
|
||||
public File licensesDir = new File(project.projectDir, 'licenses')
|
||||
|
||||
LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
|
||||
/** A map of patterns to prefix, used to find the LICENSE and NOTICE file. */
|
||||
private LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
|
||||
|
||||
/**
|
||||
* Add a mapping from a regex pattern for the jar name, to a prefix to find
|
||||
* the LICENSE and NOTICE file for that jar.
|
||||
*/
|
||||
@Input
|
||||
void mapping(Map<String, String> props) {
|
||||
String from = props.get('from')
|
||||
public void mapping(Map<String, String> props) {
|
||||
String from = props.remove('from')
|
||||
if (from == null) {
|
||||
throw new InvalidUserDataException('Missing "from" setting for license name mapping')
|
||||
}
|
||||
String to = props.get('to')
|
||||
String to = props.remove('to')
|
||||
if (to == null) {
|
||||
throw new InvalidUserDataException('Missing "to" setting for license name mapping')
|
||||
}
|
||||
if (props.isEmpty() == false) {
|
||||
throw new InvalidUserDataException("Unknown properties for mapping on dependencyLicenses: ${props.keySet()}")
|
||||
}
|
||||
mappings.put(from, to)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
void checkDependencies() {
|
||||
// TODO: empty license dir (or error when dir exists and no deps)
|
||||
if (licensesDir.exists() == false && dependencies.isEmpty() == false) {
|
||||
public void checkDependencies() {
|
||||
if (dependencies.isEmpty()) {
|
||||
if (licensesDir.exists()) {
|
||||
throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
|
||||
}
|
||||
return // no dependencies to check
|
||||
} else if (licensesDir.exists() == false) {
|
||||
throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies")
|
||||
}
|
||||
|
||||
|
||||
// order is the same for keys and values iteration since we use a linked hashmap
|
||||
List<String> mapped = new ArrayList<>(mappings.values())
|
||||
Pattern mappingsPattern = Pattern.compile('(' + mappings.keySet().join(')|(') + ')')
|
||||
|
@ -127,7 +167,7 @@ class DependencyLicensesTask extends DefaultTask {
|
|||
}
|
||||
}
|
||||
|
||||
void checkSha(File jar, String jarName, Set<File> shaFiles) {
|
||||
private void checkSha(File jar, String jarName, Set<File> shaFiles) {
|
||||
File shaFile = new File(licensesDir, jarName + SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
throw new GradleException("Missing SHA for ${jarName}. Run 'gradle updateSHAs' to create")
|
||||
|
@ -143,7 +183,7 @@ class DependencyLicensesTask extends DefaultTask {
|
|||
shaFiles.remove(shaFile)
|
||||
}
|
||||
|
||||
void checkFile(String name, String jarName, Map<String, Integer> counters, String type) {
|
||||
private void checkFile(String name, String jarName, Map<String, Integer> counters, String type) {
|
||||
String fileName = "${name}-${type}"
|
||||
Integer count = counters.get(fileName)
|
||||
if (count == null) {
|
||||
|
@ -158,10 +198,12 @@ class DependencyLicensesTask extends DefaultTask {
|
|||
counters.put(fileName, count + 1)
|
||||
}
|
||||
|
||||
static class UpdateShasTask extends DefaultTask {
|
||||
DependencyLicensesTask parentTask
|
||||
/** A helper task to update the sha files in the license dir. */
|
||||
public static class UpdateShasTask extends DefaultTask {
|
||||
private DependencyLicensesTask parentTask
|
||||
|
||||
@TaskAction
|
||||
void updateShas() {
|
||||
public void updateShas() {
|
||||
Set<File> shaFiles = new HashSet<File>()
|
||||
parentTask.licensesDir.eachFile {
|
||||
String name = it.getName()
|
||||
|
|
|
@ -19,10 +19,11 @@
|
|||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.OutputFiles
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.util.PatternFilterable
|
||||
|
@ -33,14 +34,19 @@ import java.util.regex.Pattern
|
|||
/**
|
||||
* Checks for patterns in source files for the project which are forbidden.
|
||||
*/
|
||||
class ForbiddenPatternsTask extends DefaultTask {
|
||||
Map<String,String> patterns = new LinkedHashMap<>()
|
||||
PatternFilterable filesFilter = new PatternSet()
|
||||
public class ForbiddenPatternsTask extends DefaultTask {
|
||||
|
||||
/** The rules: a map from the rule name, to a rule regex pattern. */
|
||||
private Map<String,String> patterns = new LinkedHashMap<>()
|
||||
/** A pattern set of which files should be checked. */
|
||||
private PatternFilterable filesFilter = new PatternSet()
|
||||
|
||||
@OutputFile
|
||||
File outputMarker = new File(project.buildDir, "markers/forbiddenPatterns")
|
||||
|
||||
ForbiddenPatternsTask() {
|
||||
public ForbiddenPatternsTask() {
|
||||
description = 'Checks source files for invalid patterns like nocommits or tabs'
|
||||
|
||||
// we always include all source files, and exclude what should not be checked
|
||||
filesFilter.include('**')
|
||||
// exclude known binary extensions
|
||||
|
@ -52,23 +58,28 @@ class ForbiddenPatternsTask extends DefaultTask {
|
|||
filesFilter.exclude('**/*.crt')
|
||||
filesFilter.exclude('**/*.png')
|
||||
|
||||
// TODO: add compile and test compile outputs as this tasks outputs, so we don't rerun when source files haven't changed
|
||||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
}
|
||||
|
||||
/** Adds a file glob pattern to be excluded */
|
||||
void exclude(String... excludes) {
|
||||
public void exclude(String... excludes) {
|
||||
this.filesFilter.exclude(excludes)
|
||||
}
|
||||
|
||||
/** Adds pattern to forbid */
|
||||
/** Adds a pattern to forbid. T */
|
||||
void rule(Map<String,String> props) {
|
||||
String name = props.get('name')
|
||||
String name = props.remove('name')
|
||||
if (name == null) {
|
||||
throw new IllegalArgumentException('Missing [name] for invalid pattern rule')
|
||||
throw new InvalidUserDataException('Missing [name] for invalid pattern rule')
|
||||
}
|
||||
String pattern = props.get('pattern')
|
||||
String pattern = props.remove('pattern')
|
||||
if (pattern == null) {
|
||||
throw new IllegalArgumentException('Missing [pattern] for invalid pattern rule')
|
||||
throw new InvalidUserDataException('Missing [pattern] for invalid pattern rule')
|
||||
}
|
||||
if (props.isEmpty() == false) {
|
||||
throw new InvalidUserDataException("Unknown arguments for ForbiddenPatterns rule mapping: ${props.keySet()}")
|
||||
}
|
||||
// TODO: fail if pattern contains a newline, it won't work (currently)
|
||||
patterns.put(name, pattern)
|
||||
|
@ -89,14 +100,14 @@ class ForbiddenPatternsTask extends DefaultTask {
|
|||
Pattern allPatterns = Pattern.compile('(' + patterns.values().join(')|(') + ')')
|
||||
List<String> failures = new ArrayList<>()
|
||||
for (File f : files()) {
|
||||
f.eachLine('UTF-8') { line, lineNumber ->
|
||||
f.eachLine('UTF-8') { String line, int lineNumber ->
|
||||
if (allPatterns.matcher(line).find()) {
|
||||
addErrorMessages(failures, f, (String)line, (int)lineNumber)
|
||||
addErrorMessages(failures, f, line, lineNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (failures.isEmpty() == false) {
|
||||
throw new IllegalArgumentException('Found invalid patterns:\n' + failures.join('\n'))
|
||||
throw new GradleException('Found invalid patterns:\n' + failures.join('\n'))
|
||||
}
|
||||
outputMarker.setText('done', 'UTF-8')
|
||||
}
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFile
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs CheckJarHell on a classpath.
|
||||
*/
|
||||
public class JarHellTask extends LoggedExec {
|
||||
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
public File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
|
||||
/** The classpath to run jarhell check on, defaults to the test runtime classpath */
|
||||
@InputFile
|
||||
public FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
|
||||
public JarHellTask() {
|
||||
project.afterEvaluate {
|
||||
dependsOn(classpath)
|
||||
description = "Runs CheckJarHell on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
doFirst({
|
||||
/* JarHell doesn't like getting directories that don't exist but
|
||||
gradle isn't especially careful about that. So we have to do it
|
||||
filter it ourselves. */
|
||||
FileCollection taskClasspath = classpath.filter { it.exists() }
|
||||
args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
|
||||
})
|
||||
doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,16 +18,10 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApisExtension
|
||||
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.TaskContainer
|
||||
|
||||
/**
|
||||
* Validation tasks which should be run before committing. These run before tests.
|
||||
|
@ -35,36 +29,34 @@ import org.gradle.api.tasks.TaskContainer
|
|||
class PrecommitTasks {
|
||||
|
||||
/** Adds a precommit task, which depends on non-test verification tasks. */
|
||||
static void configure(Project project) {
|
||||
List precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureForbiddenPatterns(project.tasks),
|
||||
configureJarHell(project)]
|
||||
public static Task create(Project project, boolean includeDependencyLicenses) {
|
||||
|
||||
Map precommitOptions = [
|
||||
name: 'precommit',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs all non-test checks.',
|
||||
dependsOn: precommitTasks
|
||||
]
|
||||
Task precommit = project.tasks.create(precommitOptions)
|
||||
project.check.dependsOn(precommit)
|
||||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class)]
|
||||
|
||||
// delay ordering relative to test tasks, since they may not be setup yet
|
||||
project.afterEvaluate {
|
||||
Task test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
test.mustRunAfter(precommit)
|
||||
}
|
||||
Task integTest = project.tasks.findByName('integTest')
|
||||
if (integTest != null) {
|
||||
integTest.mustRunAfter(precommit)
|
||||
}
|
||||
// tasks with just tests don't need dependency licenses, so this flag makes adding
|
||||
// the task optional
|
||||
if (includeDependencyLicenses) {
|
||||
DependencyLicensesTask dependencyLicenses = project.tasks.create('dependencyLicenses', DependencyLicensesTask.class)
|
||||
precommitTasks.add(dependencyLicenses)
|
||||
// we also create the updateShas helper task that is associated with dependencyLicenses
|
||||
UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class)
|
||||
updateShas.parentTask = dependencyLicenses
|
||||
}
|
||||
|
||||
Map<String, Object> precommitOptions = [
|
||||
name: 'precommit',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs all non-test checks.',
|
||||
dependsOn: precommitTasks
|
||||
]
|
||||
return project.tasks.create(precommitOptions)
|
||||
}
|
||||
|
||||
static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply('de.thetaphi.forbiddenapis')
|
||||
private static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply(ForbiddenApisPlugin.class)
|
||||
project.forbiddenApis {
|
||||
internalRuntimeForbidden = true
|
||||
failOnUnsupportedJava = false
|
||||
|
@ -75,72 +67,18 @@ class PrecommitTasks {
|
|||
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
|
||||
if (mainForbidden != null) {
|
||||
mainForbidden.configure {
|
||||
bundledSignatures += ['jdk-system-out']
|
||||
signaturesURLs += [
|
||||
getClass().getResource('/forbidden/core-signatures.txt'),
|
||||
getClass().getResource('/forbidden/third-party-signatures.txt')]
|
||||
bundledSignatures += 'jdk-system-out'
|
||||
signaturesURLs += getClass().getResource('/forbidden/core-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
|
||||
if (testForbidden != null) {
|
||||
testForbidden.configure {
|
||||
signaturesURLs += [getClass().getResource('/forbidden/test-signatures.txt')]
|
||||
signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
|
||||
return forbiddenApis
|
||||
}
|
||||
|
||||
static Task configureForbiddenPatterns(TaskContainer tasks) {
|
||||
Map options = [
|
||||
name: 'forbiddenPatterns',
|
||||
type: ForbiddenPatternsTask,
|
||||
description: 'Checks source files for invalid patterns like nocommits or tabs',
|
||||
]
|
||||
return tasks.create(options) {
|
||||
rule name: 'nocommit', pattern: /nocommit/
|
||||
rule name: 'tab', pattern: /\t/
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a task to run jar hell before on the test classpath.
|
||||
*
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
static Task configureJarHell(Project project) {
|
||||
File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
Exec task = project.tasks.create(name: 'jarHell', type: Exec)
|
||||
FileCollection testClasspath = project.sourceSets.test.runtimeClasspath
|
||||
task.dependsOn(testClasspath)
|
||||
task.inputs.files(testClasspath)
|
||||
task.outputs.file(successMarker)
|
||||
task.executable = new File(project.javaHome, 'bin/java')
|
||||
task.doFirst({
|
||||
/* JarHell doesn't like getting directories that don't exist but
|
||||
gradle isn't especially careful about that. So we have to do it
|
||||
filter it ourselves. */
|
||||
def taskClasspath = testClasspath.filter { it.exists() }
|
||||
task.args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
|
||||
})
|
||||
if (task.logger.isInfoEnabled() == false) {
|
||||
task.standardOutput = new ByteArrayOutputStream()
|
||||
task.errorOutput = task.standardOutput
|
||||
task.ignoreExitValue = true
|
||||
task.doLast({
|
||||
if (execResult.exitValue != 0) {
|
||||
logger.error(standardOutput.toString())
|
||||
throw new GradleException("JarHell failed")
|
||||
}
|
||||
})
|
||||
}
|
||||
task.doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
return task
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.security.MessageDigest
|
||||
|
||||
/**
|
||||
* A task to update shas used by {@code DependencyLicensesCheck}
|
||||
*/
|
||||
public class UpdateShasTask extends DefaultTask {
|
||||
|
||||
/** The parent dependency licenses task to use configuration from */
|
||||
public DependencyLicensesTask parentTask
|
||||
|
||||
public UpdateShasTask() {
|
||||
description = 'Updates the sha files for the dependencyLicenses check'
|
||||
onlyIf { parentTask.licensesDir.exists() }
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void updateShas() {
|
||||
Set<File> shaFiles = new HashSet<File>()
|
||||
parentTask.licensesDir.eachFile {
|
||||
String name = it.getName()
|
||||
if (name.endsWith(DependencyLicensesTask.SHA_EXTENSION)) {
|
||||
shaFiles.add(it)
|
||||
}
|
||||
}
|
||||
for (File dependency : parentTask.dependencies) {
|
||||
String jarName = dependency.getName()
|
||||
File shaFile = new File(parentTask.licensesDir, jarName + DependencyLicensesTask.SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
logger.lifecycle("Adding sha for ${jarName}")
|
||||
String sha = MessageDigest.getInstance("SHA-1").digest(dependency.getBytes()).encodeHex().toString()
|
||||
shaFile.setText(sha, 'UTF-8')
|
||||
} else {
|
||||
shaFiles.remove(shaFile)
|
||||
}
|
||||
}
|
||||
shaFiles.each { shaFile ->
|
||||
logger.lifecycle("Removing unused sha ${shaFile.getName()}")
|
||||
Files.delete(shaFile.toPath())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
|
@ -25,7 +27,7 @@ import org.gradle.api.tasks.Input
|
|||
class ClusterConfiguration {
|
||||
|
||||
@Input
|
||||
String distribution = 'zip'
|
||||
String distribution = 'integ-test-zip'
|
||||
|
||||
@Input
|
||||
int numNodes = 1
|
||||
|
@ -64,7 +66,12 @@ class ClusterConfiguration {
|
|||
|
||||
Map<String, String> settings = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, FileCollection> plugins = new LinkedHashMap<>()
|
||||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||
|
||||
List<Project> modules = new ArrayList<>()
|
||||
|
||||
LinkedHashMap<String, Object[]> setupCommands = new LinkedHashMap<>()
|
||||
|
||||
|
@ -83,8 +90,31 @@ class ClusterConfiguration {
|
|||
plugins.put(name, file)
|
||||
}
|
||||
|
||||
@Input
|
||||
void plugin(String name, Project pluginProject) {
|
||||
plugins.put(name, pluginProject)
|
||||
}
|
||||
|
||||
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
||||
@Input
|
||||
void module(Project moduleProject) {
|
||||
modules.add(moduleProject)
|
||||
}
|
||||
|
||||
@Input
|
||||
void setupCommand(String name, Object... args) {
|
||||
setupCommands.put(name, args)
|
||||
}
|
||||
|
||||
/**
|
||||
* Add an extra configuration file. The path is relative to the config dir, and the sourceFile
|
||||
* is anything accepted by project.file()
|
||||
*/
|
||||
@Input
|
||||
void extraConfigFile(String path, Object sourceFile) {
|
||||
if (path == 'elasticsearch.yml') {
|
||||
throw new GradleException('Overwriting elasticsearch.yml is not allowed, add additional settings using cluster { setting "foo", "bar" }')
|
||||
}
|
||||
extraConfigFiles.put(path, sourceFile)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,13 +20,14 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Delete
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.*
|
||||
|
||||
import java.nio.file.Paths
|
||||
|
||||
|
@ -59,7 +60,12 @@ class ClusterFormationTasks {
|
|||
/** Adds a dependency on the given distribution */
|
||||
static void configureDistributionDependency(Project project, String distro) {
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
String packaging = distro == 'tar' ? 'tar.gz' : distro
|
||||
String packaging = distro
|
||||
if (distro == 'tar') {
|
||||
packaging = 'tar.gz'
|
||||
} else if (distro == 'integ-test-zip') {
|
||||
packaging = 'zip'
|
||||
}
|
||||
project.configurations {
|
||||
elasticsearchDistro
|
||||
}
|
||||
|
@ -99,17 +105,19 @@ class ClusterFormationTasks {
|
|||
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node)
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
|
||||
// install modules
|
||||
for (Project module : node.config.modules) {
|
||||
String actionName = pluginTaskName('install', module.name, 'Module')
|
||||
setup = configureInstallModuleTask(taskName(task, node, actionName), project, setup, node, module)
|
||||
}
|
||||
|
||||
// install plugins
|
||||
for (Map.Entry<String, FileCollection> plugin : node.config.plugins.entrySet()) {
|
||||
// replace every dash followed by a character with just the uppercase character
|
||||
String camelName = plugin.getKey().replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
|
||||
String actionName = "install${camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1)}Plugin"
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, plugin.getValue().singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
|
||||
setup = configureExecTask(taskName(task, node, actionName), project, setup, node, args)
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
|
||||
}
|
||||
|
||||
// extra setup commands
|
||||
|
@ -133,8 +141,15 @@ class ClusterFormationTasks {
|
|||
/** Adds a task to extract the elasticsearch distribution */
|
||||
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
List extractDependsOn = [project.configurations.elasticsearchDistro, setup]
|
||||
/* project.configurations.elasticsearchDistro.singleFile will be an
|
||||
external artifact if this is being run by a plugin not living in the
|
||||
elasticsearch source tree. If this is a plugin built in the
|
||||
elasticsearch source tree or this is a distro in the elasticsearch
|
||||
source tree then this should be the version of elasticsearch built
|
||||
by the source tree. If it isn't then Bad Things(TM) will happen. */
|
||||
Task extract
|
||||
switch (node.config.distribution) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
|
||||
from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) }
|
||||
|
@ -149,6 +164,33 @@ class ClusterFormationTasks {
|
|||
into node.baseDir
|
||||
}
|
||||
break;
|
||||
case 'rpm':
|
||||
File rpmDatabase = new File(node.baseDir, 'rpm-database')
|
||||
File rpmExtracted = new File(node.baseDir, 'rpm-extracted')
|
||||
/* Delay reading the location of the rpm file until task execution */
|
||||
Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers',
|
||||
'--dbpath', rpmDatabase,
|
||||
'--relocate', "/=${rpmExtracted}",
|
||||
'-i', rpm
|
||||
doFirst {
|
||||
rpmDatabase.deleteDir()
|
||||
rpmExtracted.deleteDir()
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 'deb':
|
||||
/* Delay reading the location of the deb file until task execution */
|
||||
File debExtracted = new File(node.baseDir, 'deb-extracted')
|
||||
Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'dpkg-deb', '-x', deb, debExtracted
|
||||
doFirst {
|
||||
debExtracted.deleteDir()
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new InvalidUserDataException("Unknown distribution: ${node.config.distribution}")
|
||||
}
|
||||
|
@ -169,29 +211,123 @@ class ClusterFormationTasks {
|
|||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls' : 'http://snapshot.test*'
|
||||
]
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
return project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) << {
|
||||
File configFile = new File(node.homeDir, 'config/elasticsearch.yml')
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
writeConfig.doFirst {
|
||||
File configFile = new File(node.confDir, 'elasticsearch.yml')
|
||||
logger.info("Configuring ${configFile}")
|
||||
configFile.setText(esConfig.collect { key, value -> "${key}: ${value}" }.join('\n'), 'UTF-8')
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to copy plugins to a temp dir, which they will later be installed from. */
|
||||
static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
if (node.config.extraConfigFiles.isEmpty()) {
|
||||
return setup
|
||||
}
|
||||
Copy copyConfig = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
copyConfig.into(new File(node.homeDir, 'config')) // copy must always have a general dest dir, even though we don't use it
|
||||
for (Map.Entry<String,Object> extraConfigFile : node.config.extraConfigFiles.entrySet()) {
|
||||
copyConfig.doFirst {
|
||||
// make sure the copy won't be a no-op or act on a directory
|
||||
File srcConfigFile = project.file(extraConfigFile.getValue())
|
||||
if (srcConfigFile.isDirectory()) {
|
||||
throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}")
|
||||
}
|
||||
if (srcConfigFile.exists() == false) {
|
||||
throw new GradleException("Source file for extraConfigFile does not exist: ${srcConfigFile}")
|
||||
}
|
||||
}
|
||||
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
|
||||
copyConfig.into(destConfigFile.canonicalFile.parentFile)
|
||||
.from({ extraConfigFile.getValue() }) // wrap in closure to delay resolution to execution time
|
||||
.rename { destConfigFile.name }
|
||||
}
|
||||
return copyConfig
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a task to copy plugins to a temp dir, which they will later be installed from.
|
||||
*
|
||||
* For each plugin, if the plugin has rest spec apis in its tests, those api files are also copied
|
||||
* to the test resources for this project.
|
||||
*/
|
||||
static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
if (node.config.plugins.isEmpty()) {
|
||||
return setup
|
||||
}
|
||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
|
||||
return project.tasks.create(name: name, type: Copy, dependsOn: setup) {
|
||||
into node.pluginsTmpDir
|
||||
from(node.config.plugins.values())
|
||||
List<FileCollection> pluginFiles = []
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
FileCollection pluginZip
|
||||
if (plugin.getValue() instanceof Project) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
|
||||
}
|
||||
String configurationName = "_plugin_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
project.dependencies.add(configurationName, pluginProject)
|
||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||
pluginZip = configuration
|
||||
|
||||
// also allow rest tests to use the rest spec from the plugin
|
||||
Copy copyRestSpec = null
|
||||
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
||||
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
||||
if (restApiDir.exists() == false) continue
|
||||
if (copyRestSpec == null) {
|
||||
copyRestSpec = project.tasks.create(name: pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec'), type: Copy)
|
||||
copyPlugins.dependsOn(copyRestSpec)
|
||||
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
||||
}
|
||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||
}
|
||||
} else {
|
||||
pluginZip = plugin.getValue()
|
||||
}
|
||||
pluginFiles.add(pluginZip)
|
||||
}
|
||||
|
||||
copyPlugins.into(node.pluginsTmpDir)
|
||||
copyPlugins.from(pluginFiles)
|
||||
return copyPlugins
|
||||
}
|
||||
|
||||
static Task configureInstallModuleTask(String name, Project project, Task setup, NodeInfo node, Project module) {
|
||||
if (node.config.distribution != 'integ-test-zip') {
|
||||
throw new GradleException("Module ${module.path} not allowed be installed distributions other than integ-test-zip because they should already have all modules bundled!")
|
||||
}
|
||||
if (module.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot include module ${module.path} which is not an esplugin")
|
||||
}
|
||||
Copy installModule = project.tasks.create(name, Copy.class)
|
||||
installModule.dependsOn(setup)
|
||||
installModule.into(new File(node.homeDir, "modules/${module.name}"))
|
||||
installModule.from({ project.zipTree(module.tasks.bundlePlugin.outputs.files.singleFile) })
|
||||
return installModule
|
||||
}
|
||||
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
|
||||
FileCollection pluginZip
|
||||
if (plugin instanceof Project) {
|
||||
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
} else {
|
||||
pluginZip = plugin
|
||||
}
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
||||
/** Adds a task to execute a command to help setup the cluster */
|
||||
static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) {
|
||||
return project.tasks.create(name: name, type: Exec, dependsOn: setup) {
|
||||
return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) {
|
||||
workingDir node.cwd
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable 'cmd'
|
||||
|
@ -200,35 +336,32 @@ class ClusterFormationTasks {
|
|||
executable 'sh'
|
||||
}
|
||||
args execArgs
|
||||
// only show output on failure, when not in info or debug mode
|
||||
if (logger.isInfoEnabled() == false) {
|
||||
standardOutput = new ByteArrayOutputStream()
|
||||
errorOutput = standardOutput
|
||||
ignoreExitValue = true
|
||||
doLast {
|
||||
if (execResult.exitValue != 0) {
|
||||
logger.error(standardOutput.toString())
|
||||
throw new GradleException("Process '${execArgs.join(' ')}' finished with non-zero exit value ${execResult.exitValue}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to start an elasticsearch node with the given configuration */
|
||||
static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
String executable
|
||||
List<String> esArgs = []
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable = 'cmd'
|
||||
esArgs.add('/C')
|
||||
esArgs.add('call')
|
||||
} else {
|
||||
executable = 'sh'
|
||||
}
|
||||
|
||||
// this closure is converted into ant nodes by groovy's AntBuilder
|
||||
Closure antRunner = { AntBuilder ant ->
|
||||
ant.exec(executable: node.executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
|
||||
node.env.each { key, value -> env(key: key, value: value) }
|
||||
node.args.each { arg(value: it) }
|
||||
}
|
||||
}
|
||||
|
||||
// this closure is the actual code to run elasticsearch
|
||||
Closure elasticsearchRunner = {
|
||||
// Due to how ant exec works with the spawn option, we lose all stdout/stderr from the
|
||||
// process executed. To work around this, when spawning, we wrap the elasticsearch start
|
||||
// command inside another shell script, which simply internally redirects the output
|
||||
// of the real elasticsearch script. This allows ant to keep the streams open with the
|
||||
// dummy process, but us to have the output available if there is an error in the
|
||||
// elasticsearch start script
|
||||
if (node.config.daemonize) {
|
||||
node.writeWrapperScript()
|
||||
}
|
||||
|
||||
// we must add debug options inside the closure so the config is read at execution time, as
|
||||
// gradle task options are not processed until the end of the configuration phase
|
||||
if (node.config.debug) {
|
||||
|
@ -236,37 +369,6 @@ class ClusterFormationTasks {
|
|||
node.env['JAVA_OPTS'] = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000'
|
||||
}
|
||||
|
||||
// Due to how ant exec works with the spawn option, we lose all stdout/stderr from the
|
||||
// process executed. To work around this, when spawning, we wrap the elasticsearch start
|
||||
// command inside another shell script, which simply internally redirects the output
|
||||
// of the real elasticsearch script. This allows ant to keep the streams open with the
|
||||
// dummy process, but us to have the output available if there is an error in the
|
||||
// elasticsearch start script
|
||||
String script = node.esScript
|
||||
if (node.config.daemonize) {
|
||||
String scriptName = 'run'
|
||||
String argsPasser = '"$@"'
|
||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
scriptName += '.bat'
|
||||
argsPasser = '%*'
|
||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||
}
|
||||
File wrapperScript = new File(node.cwd, scriptName)
|
||||
wrapperScript.setText("\"${script}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
script = wrapperScript.toString()
|
||||
}
|
||||
|
||||
ant.exec(executable: executable, spawn: node.config.daemonize, dir: node.cwd, taskname: 'elasticsearch') {
|
||||
node.env.each { key, value -> env(key: key, value: value) }
|
||||
arg(value: script)
|
||||
node.args.each { arg(value: it) }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// this closure is the actual code to run elasticsearch
|
||||
Closure elasticsearchRunner = {
|
||||
node.getCommandString().eachLine { line -> logger.info(line) }
|
||||
|
||||
if (logger.isInfoEnabled() || node.config.daemonize == false) {
|
||||
|
@ -338,14 +440,19 @@ class ClusterFormationTasks {
|
|||
// We already log the command at info level. No need to do it twice.
|
||||
node.getCommandString().eachLine { line -> logger.error(line) }
|
||||
}
|
||||
// the waitfor failed, so dump any output we got (may be empty if info logging, but that is ok)
|
||||
logger.error("Node ${node.nodeNum} ant output:")
|
||||
node.buffer.toString('UTF-8').eachLine { line -> logger.error(line) }
|
||||
logger.error("Node ${node.nodeNum} output:")
|
||||
logger.error("|-----------------------------------------")
|
||||
logger.error("| failure marker exists: ${node.failedMarker.exists()}")
|
||||
logger.error("| pid file exists: ${node.pidFile.exists()}")
|
||||
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
|
||||
logger.error("|\n| [ant output]")
|
||||
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
|
||||
// also dump the log file for the startup script (which will include ES logging output to stdout)
|
||||
if (node.startLog.exists()) {
|
||||
logger.error("Node ${node.nodeNum} log:")
|
||||
node.startLog.eachLine { line -> logger.error(line) }
|
||||
logger.error("|\n| [log]")
|
||||
node.startLog.eachLine { line -> logger.error("| ${line}") }
|
||||
}
|
||||
logger.error("|-----------------------------------------")
|
||||
}
|
||||
throw new GradleException(msg)
|
||||
}
|
||||
|
@ -389,7 +496,7 @@ class ClusterFormationTasks {
|
|||
|
||||
/** Adds a task to kill an elasticsearch node with the given pidfile */
|
||||
static Task configureStopTask(String name, Project project, Object depends, NodeInfo node) {
|
||||
return project.tasks.create(name: name, type: Exec, dependsOn: depends) {
|
||||
return project.tasks.create(name: name, type: LoggedExec, dependsOn: depends) {
|
||||
onlyIf { node.pidFile.exists() }
|
||||
// the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString
|
||||
ext.pid = "${ -> node.pidFile.getText('UTF-8').trim()}"
|
||||
|
@ -418,6 +525,12 @@ class ClusterFormationTasks {
|
|||
}
|
||||
}
|
||||
|
||||
static String pluginTaskName(String action, String name, String suffix) {
|
||||
// replace every dash followed by a character with just the uppercase character
|
||||
String camelName = name.replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
|
||||
return action + camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1) + suffix
|
||||
}
|
||||
|
||||
/** Runs an ant command, sending output to the given out and error streams */
|
||||
static Object runAntCommand(Project project, Closure command, PrintStream outputStream, PrintStream errorStream) {
|
||||
DefaultLogger listener = new DefaultLogger(
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
|
@ -45,6 +46,12 @@ class NodeInfo {
|
|||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
|
||||
/** config directory */
|
||||
File confDir
|
||||
|
||||
/** THE config file */
|
||||
File configFile
|
||||
|
||||
/** working directory for the node process */
|
||||
File cwd
|
||||
|
||||
|
@ -63,8 +70,14 @@ class NodeInfo {
|
|||
/** arguments to start the node with */
|
||||
List<String> args
|
||||
|
||||
/** Executable to run the bin/elasticsearch with, either cmd or sh */
|
||||
String executable
|
||||
|
||||
/** Path to the elasticsearch start script */
|
||||
String esScript
|
||||
File esScript
|
||||
|
||||
/** script to run when running in the background */
|
||||
File wrapperScript
|
||||
|
||||
/** buffer for ant output when starting this node */
|
||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
|
@ -77,34 +90,75 @@ class NodeInfo {
|
|||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
homeDir = homeDir(baseDir, config.distribution)
|
||||
confDir = confDir(baseDir, config.distribution)
|
||||
configFile = new File(confDir, 'elasticsearch.yml')
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
pluginsTmpDir = new File(baseDir, "plugins tmp")
|
||||
|
||||
args = []
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
executable = 'cmd'
|
||||
args.add('/C')
|
||||
args.add('"') // quote the entire command
|
||||
wrapperScript = new File(cwd, "run.bat")
|
||||
esScript = new File(homeDir, 'bin/elasticsearch.bat')
|
||||
} else {
|
||||
executable = 'sh'
|
||||
wrapperScript = new File(cwd, "run")
|
||||
esScript = new File(homeDir, 'bin/elasticsearch')
|
||||
}
|
||||
if (config.daemonize) {
|
||||
args.add("${wrapperScript}")
|
||||
} else {
|
||||
args.add("${esScript}")
|
||||
}
|
||||
|
||||
env = [
|
||||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args = config.systemProperties.collect { key, value -> "-D${key}=${value}" }
|
||||
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
args.add("-D${property.getKey()}=${property.getValue()}")
|
||||
}
|
||||
}
|
||||
// running with cmd on windows will look for this with the .bat extension
|
||||
esScript = new File(homeDir, 'bin/elasticsearch').toString()
|
||||
args.add("-Des.path.conf=${confDir}")
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns debug string for the command that started this node. */
|
||||
String getCommandString() {
|
||||
String esCommandString = "Elasticsearch node ${nodeNum} command: ${esScript} "
|
||||
esCommandString += args.join(' ')
|
||||
esCommandString += '\nenvironment:'
|
||||
env.each { k, v -> esCommandString += "\n ${k}: ${v}" }
|
||||
String esCommandString = "\nNode ${nodeNum} configuration:\n"
|
||||
esCommandString += "|-----------------------------------------\n"
|
||||
esCommandString += "| cwd: ${cwd}\n"
|
||||
esCommandString += "| command: ${executable} ${args.join(' ')}\n"
|
||||
esCommandString += '| environment:\n'
|
||||
env.each { k, v -> esCommandString += "| ${k}: ${v}\n" }
|
||||
if (config.daemonize) {
|
||||
esCommandString += "|\n| [${wrapperScript.name}]\n"
|
||||
wrapperScript.eachLine('UTF-8', { line -> esCommandString += " ${line}\n"})
|
||||
}
|
||||
esCommandString += '|\n| [elasticsearch.yml]\n'
|
||||
configFile.eachLine('UTF-8', { line -> esCommandString += "| ${line}\n" })
|
||||
esCommandString += "|-----------------------------------------"
|
||||
return esCommandString
|
||||
}
|
||||
|
||||
void writeWrapperScript() {
|
||||
String argsPasser = '"$@"'
|
||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
argsPasser = '%*'
|
||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||
}
|
||||
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Returns the http port for this node */
|
||||
int httpPort() {
|
||||
return config.baseHttpPort + nodeNum
|
||||
|
@ -119,13 +173,32 @@ class NodeInfo {
|
|||
static File homeDir(File baseDir, String distro) {
|
||||
String path
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
path = "elasticsearch-${VersionProperties.elasticsearch}"
|
||||
break;
|
||||
break
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
path = "${distro}-extracted/usr/share/elasticsearch"
|
||||
break
|
||||
default:
|
||||
throw new InvalidUserDataException("Unknown distribution: ${distro}")
|
||||
}
|
||||
return new File(baseDir, path)
|
||||
}
|
||||
|
||||
static File confDir(File baseDir, String distro) {
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
return new File(homeDir(baseDir, distro), 'config')
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")
|
||||
default:
|
||||
throw new InvalidUserDataException("Unkown distribution: ${distro}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,52 +31,38 @@ import org.gradle.util.ConfigureUtil
|
|||
* Runs integration tests, but first starts an ES cluster,
|
||||
* and passes the ES cluster info as parameters to the tests.
|
||||
*/
|
||||
class RestIntegTestTask extends RandomizedTestingTask {
|
||||
public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration()
|
||||
|
||||
/** Flag indicating whether the rest tests in the rest spec should be run. */
|
||||
@Input
|
||||
boolean includePackaged = false
|
||||
|
||||
static RestIntegTestTask configure(Project project) {
|
||||
Map integTestOptions = [
|
||||
name: 'integTest',
|
||||
type: RestIntegTestTask,
|
||||
dependsOn: 'testClasses',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs rest tests against an elasticsearch cluster.'
|
||||
]
|
||||
RestIntegTestTask integTest = project.tasks.create(integTestOptions)
|
||||
integTest.configure(BuildPlugin.commonTestConfig(project))
|
||||
integTest.configure {
|
||||
include '**/*IT.class'
|
||||
systemProperty 'tests.rest.load_packaged', 'false'
|
||||
}
|
||||
RandomizedTestingTask test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
integTest.classpath = test.classpath
|
||||
integTest.testClassesDir = test.testClassesDir
|
||||
integTest.mustRunAfter(test)
|
||||
}
|
||||
project.check.dependsOn(integTest)
|
||||
public RestIntegTestTask() {
|
||||
description = 'Runs rest tests against an elasticsearch cluster.'
|
||||
group = JavaBasePlugin.VERIFICATION_GROUP
|
||||
dependsOn(project.testClasses)
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir = project.sourceSets.test.output.classesDir
|
||||
|
||||
// start with the common test configuration
|
||||
configure(BuildPlugin.commonTestConfig(project))
|
||||
// override/add more for rest tests
|
||||
parallelism = '1'
|
||||
include('**/*IT.class')
|
||||
systemProperty('tests.rest.load_packaged', 'false')
|
||||
|
||||
// copy the rest spec/tests into the test resources
|
||||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
integTest.dependsOn(RestSpecHack.configureTask(project, integTest.includePackaged))
|
||||
dependsOn(RestSpecHack.configureTask(project, includePackaged))
|
||||
systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}")
|
||||
}
|
||||
return integTest
|
||||
}
|
||||
|
||||
RestIntegTestTask() {
|
||||
project.afterEvaluate {
|
||||
Task test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
mustRunAfter(test)
|
||||
}
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
configure {
|
||||
parallelism '1'
|
||||
systemProperty 'tests.cluster', "localhost:${clusterConfig.baseTransportPort}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,11 +75,11 @@ class RestIntegTestTask extends RandomizedTestingTask {
|
|||
}
|
||||
|
||||
@Input
|
||||
void cluster(Closure closure) {
|
||||
public void cluster(Closure closure) {
|
||||
ConfigureUtil.configure(closure, clusterConfig)
|
||||
}
|
||||
|
||||
ClusterConfiguration getCluster() {
|
||||
public ClusterConfiguration getCluster() {
|
||||
return clusterConfig
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,12 +28,12 @@ import org.gradle.api.tasks.Copy
|
|||
* currently must be available on the local filesystem. This class encapsulates
|
||||
* setting up tasks to copy the rest spec api to test resources.
|
||||
*/
|
||||
class RestSpecHack {
|
||||
public class RestSpecHack {
|
||||
/**
|
||||
* Sets dependencies needed to copy the rest spec.
|
||||
* @param project The project to add rest spec dependency to
|
||||
*/
|
||||
static void configureDependencies(Project project) {
|
||||
public static void configureDependencies(Project project) {
|
||||
project.configurations {
|
||||
restSpec
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ class RestSpecHack {
|
|||
* @param project The project to add the copy task to
|
||||
* @param includePackagedTests true if the packaged tests should be copied, false otherwise
|
||||
*/
|
||||
static Task configureTask(Project project, boolean includePackagedTests) {
|
||||
public static Task configureTask(Project project, boolean includePackagedTests) {
|
||||
Map copyRestSpecProps = [
|
||||
name : 'copyRestSpec',
|
||||
type : Copy,
|
||||
|
@ -65,7 +65,6 @@ class RestSpecHack {
|
|||
project.idea {
|
||||
module {
|
||||
if (scopes.TEST != null) {
|
||||
// TODO: need to add the TEST scope somehow for rest test plugin...
|
||||
scopes.TEST.plus.add(project.configurations.restSpec)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,22 +18,19 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
|
||||
/** Configures the build to have a rest integration test. */
|
||||
class RestTestPlugin implements Plugin<Project> {
|
||||
/** A plugin to add rest integration tests. Used for qa projects. */
|
||||
public class RestTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(StandaloneTestBasePlugin)
|
||||
|
||||
RandomizedTestingTask integTest = RestIntegTestTask.configure(project)
|
||||
RestSpecHack.configureDependencies(project)
|
||||
integTest.configure {
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir project.sourceSets.test.output.classesDir
|
||||
}
|
||||
RestIntegTestTask integTest = project.tasks.create('integTest', RestIntegTestTask.class)
|
||||
integTest.cluster.distribution = 'zip' // rest tests should run with the real zip
|
||||
integTest.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(integTest)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,13 +2,17 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
class RunTask extends DefaultTask {
|
||||
public class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false)
|
||||
|
||||
RunTask() {
|
||||
public RunTask() {
|
||||
description = "Runs elasticsearch with '${project.path}'"
|
||||
group = 'Verification'
|
||||
project.afterEvaluate {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
}
|
||||
|
@ -22,11 +26,10 @@ class RunTask extends DefaultTask {
|
|||
clusterConfig.debug = enabled;
|
||||
}
|
||||
|
||||
static void configure(Project project) {
|
||||
RunTask task = project.tasks.create(
|
||||
name: 'run',
|
||||
type: RunTask,
|
||||
description: "Runs elasticsearch with '${project.path}'",
|
||||
group: 'Verification')
|
||||
/** Configure the cluster that will be run. */
|
||||
@Override
|
||||
public Task configure(Closure closure) {
|
||||
ConfigureUtil.configure(closure, clusterConfig)
|
||||
return this
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,35 +27,27 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
|
|||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.plugins.ide.eclipse.model.EclipseClasspath
|
||||
|
||||
/** Configures the build to have a rest integration test. */
|
||||
class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
public class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(JavaBasePlugin)
|
||||
project.pluginManager.apply(RandomizedTestingPlugin)
|
||||
|
||||
BuildPlugin.globalBuildInfo(project)
|
||||
BuildPlugin.configureRepositories(project)
|
||||
|
||||
// remove some unnecessary tasks for a qa test
|
||||
project.tasks.removeAll { it.name in ['assemble', 'buildDependents'] }
|
||||
|
||||
// only setup tests to build
|
||||
project.sourceSets {
|
||||
test
|
||||
}
|
||||
project.dependencies {
|
||||
testCompile "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}"
|
||||
}
|
||||
project.sourceSets.create('test')
|
||||
project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}")
|
||||
|
||||
project.eclipse {
|
||||
classpath {
|
||||
sourceSets = [project.sourceSets.test]
|
||||
plusConfigurations = [project.configurations.testRuntime]
|
||||
}
|
||||
}
|
||||
PrecommitTasks.configure(project)
|
||||
project.eclipse.classpath.sourceSets = [project.sourceSets.test]
|
||||
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
|
||||
|
||||
PrecommitTasks.create(project, false)
|
||||
project.check.dependsOn(project.precommit)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,11 +25,11 @@ import org.gradle.api.Plugin
|
|||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
||||
/** Configures the build to have only unit tests. */
|
||||
class StandaloneTestPlugin implements Plugin<Project> {
|
||||
/** A plugin to add tests only. Used for QA tests that run arbitrary unit tests. */
|
||||
public class StandaloneTestPlugin implements Plugin<Project> {
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
public void apply(Project project) {
|
||||
project.pluginManager.apply(StandaloneTestBasePlugin)
|
||||
|
||||
Map testOptions = [
|
||||
|
@ -41,10 +41,9 @@ class StandaloneTestPlugin implements Plugin<Project> {
|
|||
]
|
||||
RandomizedTestingTask test = project.tasks.create(testOptions)
|
||||
test.configure(BuildPlugin.commonTestConfig(project))
|
||||
test.configure {
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir project.sourceSets.test.output.classesDir
|
||||
}
|
||||
test.classpath = project.sourceSets.test.runtimeClasspath
|
||||
test.testClassesDir project.sourceSets.test.output.classesDir
|
||||
test.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(test)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -112,3 +112,7 @@ java.lang.System#setProperty(java.lang.String,java.lang.String)
|
|||
java.lang.System#clearProperty(java.lang.String)
|
||||
java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a read-only view
|
||||
|
||||
@defaultMessage Avoid unchecked warnings by using Collections#empty(List|Map|Set) methods
|
||||
java.util.Collections#EMPTY_LIST
|
||||
java.util.Collections#EMPTY_MAP
|
||||
java.util.Collections#EMPTY_SET
|
||||
|
|
|
@ -90,3 +90,12 @@ org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
|||
|
||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
||||
org.apache.lucene.search.Query#setBoost(float)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
||||
|
|
|
@ -1,66 +0,0 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage unsafe encoders/decoders have problems in the lzf compress library. Use variants of encode/decode functions which take Encoder/Decoder.
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkEncoders#createNonAllocatingEncoder(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.impl.UnsafeChunkDecoder#<init>()
|
||||
com.ning.compress.lzf.parallel.CompressTask
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkEncoderFactory#optimalNonAllocatingInstance(int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.util.ChunkDecoderFactory#optimalInstance()
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileInputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.File, boolean)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.io.FileDescriptor)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String)
|
||||
com.ning.compress.lzf.util.LZFFileOutputStream#<init>(java.lang.String, boolean)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[])
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFEncoder#encode(byte[], int, int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int)
|
||||
com.ning.compress.lzf.LZFEncoder#appendEncoded(byte[], int, int, byte[], int, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFCompressingInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFDecoder#fastDecoder()
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int)
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], byte[])
|
||||
com.ning.compress.lzf.LZFDecoder#decode(byte[], int, int, byte[])
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, boolean)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFInputStream#<init>(java.io.InputStream, com.ning.compress.BufferRecycler, boolean)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream)
|
||||
com.ning.compress.lzf.LZFOutputStream#<init>(java.io.OutputStream, com.ning.compress.BufferRecycler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler)
|
||||
com.ning.compress.lzf.LZFUncompressor#<init>(com.ning.compress.DataHandler, com.ning.compress.BufferRecycler)
|
||||
|
||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||
org.joda.time.DateTime#<init>()
|
||||
org.joda.time.DateTime#<init>(long)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#<init>(int, int, int, int, int, int, int)
|
||||
org.joda.time.DateTime#now()
|
||||
org.joda.time.DateTimeZone#getDefault()
|
|
@ -62,12 +62,9 @@ dependencies {
|
|||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
|
||||
compile "org.yaml:snakeyaml:1.15" // used by jackson yaml
|
||||
|
||||
// network stack
|
||||
compile 'io.netty:netty:3.10.5.Final'
|
||||
// compression of transport protocol
|
||||
compile 'com.ning:compress-lzf:1.0.2'
|
||||
// percentiles aggregation
|
||||
compile 'com.tdunning:t-digest:3.0'
|
||||
// precentil ranks aggregation
|
||||
|
@ -117,6 +114,9 @@ forbiddenPatterns {
|
|||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
if (isEclipse == false || project.path == ":core-tests") {
|
||||
task integTest(type: RandomizedTestingTask,
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
|
@ -129,8 +129,4 @@ if (isEclipse == false || project.path == ":core-tests") {
|
|||
}
|
||||
check.dependsOn integTest
|
||||
integTest.mustRunAfter test
|
||||
|
||||
RestSpecHack.configureDependencies(project)
|
||||
Task copyRestSpec = RestSpecHack.configureTask(project, true)
|
||||
integTest.dependsOn copyRestSpec
|
||||
}
|
||||
|
|
|
@ -51,9 +51,13 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
private static final Map<Class<? extends ElasticsearchException>, ElasticsearchExceptionHandle> CLASS_TO_ELASTICSEARCH_EXCEPTION_HANDLE;
|
||||
private final Map<String, List<String>> headers = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Construct a <code>ElasticsearchException</code> with the specified cause exception.
|
||||
*/
|
||||
public ElasticsearchException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a <code>ElasticsearchException</code> with the specified detail message.
|
||||
*
|
||||
|
@ -550,7 +554,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class, org.elasticsearch.transport.NodeDisconnectedException::new, 84),
|
||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class, org.elasticsearch.index.AlreadyExpiredException::new, 85),
|
||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class, org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86),
|
||||
MERGE_MAPPING_EXCEPTION(org.elasticsearch.index.mapper.MergeMappingException.class, org.elasticsearch.index.mapper.MergeMappingException::new, 87),
|
||||
// 87 used to be for MergeMappingException
|
||||
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class, org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
|
||||
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class, org.elasticsearch.percolator.PercolateException::new, 89),
|
||||
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class, org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
|
||||
|
|
|
@ -270,11 +270,11 @@ public class Version {
|
|||
public static final int V_2_0_2_ID = 2000299;
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_0);
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_0);
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_0);
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return currentState;
|
||||
|
|
|
@ -72,14 +72,14 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
private HttpInfo http;
|
||||
|
||||
@Nullable
|
||||
private PluginsInfo plugins;
|
||||
private PluginsAndModules plugins;
|
||||
|
||||
NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsInfo plugins) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
|
@ -172,7 +172,7 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public PluginsInfo getPlugins() {
|
||||
public PluginsAndModules getPlugins() {
|
||||
return this.plugins;
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,8 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
http = HttpInfo.readHttpInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
plugins = PluginsInfo.readPluginsInfo(in);
|
||||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Information about plugins and modules
|
||||
*/
|
||||
public class PluginsAndModules implements Streamable, ToXContent {
|
||||
private List<PluginInfo> plugins;
|
||||
private List<PluginInfo> modules;
|
||||
|
||||
public PluginsAndModules() {
|
||||
plugins = new ArrayList<>();
|
||||
modules = new ArrayList<>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an ordered list based on plugins name
|
||||
*/
|
||||
public List<PluginInfo> getPluginInfos() {
|
||||
List<PluginInfo> plugins = new ArrayList<>(this.plugins);
|
||||
Collections.sort(plugins, (p1, p2) -> p1.getName().compareTo(p2.getName()));
|
||||
return plugins;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an ordered list based on modules name
|
||||
*/
|
||||
public List<PluginInfo> getModuleInfos() {
|
||||
List<PluginInfo> modules = new ArrayList<>(this.modules);
|
||||
Collections.sort(modules, (p1, p2) -> p1.getName().compareTo(p2.getName()));
|
||||
return modules;
|
||||
}
|
||||
|
||||
public void addPlugin(PluginInfo info) {
|
||||
plugins.add(info);
|
||||
}
|
||||
|
||||
public void addModule(PluginInfo info) {
|
||||
modules.add(info);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (plugins.isEmpty() == false || modules.isEmpty() == false) {
|
||||
throw new IllegalStateException("instance is already populated");
|
||||
}
|
||||
int plugins_size = in.readInt();
|
||||
for (int i = 0; i < plugins_size; i++) {
|
||||
plugins.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
int modules_size = in.readInt();
|
||||
for (int i = 0; i < modules_size; i++) {
|
||||
modules.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(plugins.size());
|
||||
for (PluginInfo plugin : getPluginInfos()) {
|
||||
plugin.writeTo(out);
|
||||
}
|
||||
out.writeInt(modules.size());
|
||||
for (PluginInfo module : getModuleInfos()) {
|
||||
module.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("plugins");
|
||||
for (PluginInfo pluginInfo : getPluginInfos()) {
|
||||
pluginInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
// TODO: not ideal, make a better api for this (e.g. with jar metadata, and so on)
|
||||
builder.startArray("modules");
|
||||
for (PluginInfo moduleInfo : getModuleInfos()) {
|
||||
moduleInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
public class PluginsInfo implements Streamable, ToXContent {
|
||||
static final class Fields {
|
||||
static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
|
||||
}
|
||||
|
||||
private List<PluginInfo> infos;
|
||||
|
||||
public PluginsInfo() {
|
||||
infos = new ArrayList<>();
|
||||
}
|
||||
|
||||
public PluginsInfo(int size) {
|
||||
infos = new ArrayList<>(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return an ordered list based on plugins name
|
||||
*/
|
||||
public List<PluginInfo> getInfos() {
|
||||
Collections.sort(infos, new Comparator<PluginInfo>() {
|
||||
@Override
|
||||
public int compare(final PluginInfo o1, final PluginInfo o2) {
|
||||
return o1.getName().compareTo(o2.getName());
|
||||
}
|
||||
});
|
||||
|
||||
return infos;
|
||||
}
|
||||
|
||||
public void add(PluginInfo info) {
|
||||
infos.add(info);
|
||||
}
|
||||
|
||||
public static PluginsInfo readPluginsInfo(StreamInput in) throws IOException {
|
||||
PluginsInfo infos = new PluginsInfo();
|
||||
infos.readFrom(in);
|
||||
return infos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
int plugins_size = in.readInt();
|
||||
for (int i = 0; i < plugins_size; i++) {
|
||||
infos.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(infos.size());
|
||||
for (PluginInfo plugin : getInfos()) {
|
||||
plugin.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray(Fields.PLUGINS);
|
||||
for (PluginInfo pluginInfo : getInfos()) {
|
||||
pluginInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -68,7 +68,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterRerouteResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
|
|
@ -91,7 +91,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
final Settings.Builder transientUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
|
||||
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings",
|
||||
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile boolean changed = false;
|
||||
|
||||
|
@ -132,7 +133,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
// in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible
|
||||
// to the components until the ClusterStateListener instances have been invoked, but are visible after
|
||||
// the first update task has been completed.
|
||||
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings",
|
||||
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
public boolean mustAck(DiscoveryNode discoveryNode) {
|
||||
|
|
|
@ -74,7 +74,7 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
|
|||
versions.add(nodeResponse.nodeInfo().getVersion());
|
||||
process.addNodeStats(nodeResponse.nodeStats());
|
||||
jvm.addNodeInfoStats(nodeResponse.nodeInfo(), nodeResponse.nodeStats());
|
||||
plugins.addAll(nodeResponse.nodeInfo().getPlugins().getInfos());
|
||||
plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
|
||||
|
||||
// now do the stats that should be deduped by hardware (implemented by ip deduping)
|
||||
TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();
|
||||
|
|
|
@ -46,9 +46,10 @@ import java.util.List;
|
|||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes
|
||||
* it in a single batch.
|
||||
* A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
|
||||
* and allows to executes it in a single batch.
|
||||
*
|
||||
* Note that we only support refresh on the bulk request not per item.
|
||||
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
|
||||
*/
|
||||
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest {
|
||||
|
@ -89,6 +90,12 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
return add(request, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a request to the current BulkRequest.
|
||||
* @param request Request to add
|
||||
* @param payload Optional payload
|
||||
* @return the current bulk request
|
||||
*/
|
||||
public BulkRequest add(ActionRequest request, @Nullable Object payload) {
|
||||
if (request instanceof IndexRequest) {
|
||||
add((IndexRequest) request, payload);
|
||||
|
@ -127,7 +134,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) {
|
||||
requests.add(request);
|
||||
addPayload(payload);
|
||||
sizeInBytes += request.source().length() + REQUEST_OVERHEAD;
|
||||
// lack of source is validated in validate() method
|
||||
sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -292,7 +300,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
String parent = null;
|
||||
String[] fields = defaultFields;
|
||||
String timestamp = null;
|
||||
Long ttl = null;
|
||||
TimeValue ttl = null;
|
||||
String opType = null;
|
||||
long version = Versions.MATCH_ANY;
|
||||
VersionType versionType = VersionType.INTERNAL;
|
||||
|
@ -325,9 +333,9 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
timestamp = parser.text();
|
||||
} else if ("_ttl".equals(currentFieldName) || "ttl".equals(currentFieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName).millis();
|
||||
ttl = TimeValue.parseTimeValue(parser.text(), null, currentFieldName);
|
||||
} else {
|
||||
ttl = parser.longValue();
|
||||
ttl = new TimeValue(parser.longValue());
|
||||
}
|
||||
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
|
||||
opType = parser.text();
|
||||
|
@ -478,8 +486,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
if (requests.isEmpty()) {
|
||||
validationException = addValidationError("no requests added", validationException);
|
||||
}
|
||||
for (int i = 0; i < requests.size(); i++) {
|
||||
ActionRequestValidationException ex = requests.get(i).validate();
|
||||
for (ActionRequest request : requests) {
|
||||
// We first check if refresh has been set
|
||||
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
|
||||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
|
||||
(request instanceof IndexRequest && ((IndexRequest)request).refresh())) {
|
||||
validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException);
|
||||
}
|
||||
ActionRequestValidationException ex = request.validate();
|
||||
if (ex != null) {
|
||||
if (validationException == null) {
|
||||
validationException = new ActionRequestValidationException();
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -136,7 +137,8 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
private String parent;
|
||||
@Nullable
|
||||
private String timestamp;
|
||||
private long ttl = -1;
|
||||
@Nullable
|
||||
private TimeValue ttl;
|
||||
|
||||
private BytesReference source;
|
||||
|
||||
|
@ -229,6 +231,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
if (!versionType.validateVersionForWrites(version)) {
|
||||
validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException);
|
||||
}
|
||||
|
||||
if (ttl != null) {
|
||||
if (ttl.millis() < 0) {
|
||||
validationException = addValidationError("ttl must not be negative", validationException);
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
@ -324,22 +332,33 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise. Setting it
|
||||
* to <tt>null</tt> will reset to have no ttl.
|
||||
* Sets the ttl value as a time value expression.
|
||||
*/
|
||||
public IndexRequest ttl(Long ttl) throws ElasticsearchGenerationException {
|
||||
if (ttl == null) {
|
||||
this.ttl = -1;
|
||||
return this;
|
||||
}
|
||||
if (ttl <= 0) {
|
||||
throw new IllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]");
|
||||
}
|
||||
public IndexRequest ttl(String ttl) {
|
||||
this.ttl = TimeValue.parseTimeValue(ttl, null, "ttl");
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ttl as a {@link TimeValue} instance.
|
||||
*/
|
||||
public IndexRequest ttl(TimeValue ttl) {
|
||||
this.ttl = ttl;
|
||||
return this;
|
||||
}
|
||||
|
||||
public long ttl() {
|
||||
/**
|
||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
||||
*/
|
||||
public IndexRequest ttl(long ttl) {
|
||||
this.ttl = new TimeValue(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the ttl as a {@link TimeValue}
|
||||
*/
|
||||
public TimeValue ttl() {
|
||||
return this.ttl;
|
||||
}
|
||||
|
||||
|
@ -665,7 +684,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
timestamp = in.readOptionalString();
|
||||
ttl = in.readLong();
|
||||
ttl = in.readBoolean() ? TimeValue.readTimeValue(in) : null;
|
||||
source = in.readBytesReference();
|
||||
|
||||
opType = OpType.fromId(in.readByte());
|
||||
|
@ -682,7 +701,12 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
out.writeOptionalString(timestamp);
|
||||
out.writeLong(ttl);
|
||||
if (ttl == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ttl.writeTo(out);
|
||||
}
|
||||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.id());
|
||||
out.writeBoolean(refresh);
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.support.replication.ReplicationRequestBuilder;
|
|||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -254,9 +255,27 @@ public class IndexRequestBuilder extends ReplicationRequestBuilder<IndexRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
// Sets the relative ttl value. It musts be > 0 as it makes little sense otherwise.
|
||||
/**
|
||||
* Sets the ttl value as a time value expression.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(String ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the relative ttl value in milliseconds. It musts be greater than 0 as it makes little sense otherwise.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(long ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the ttl as a {@link TimeValue} instance.
|
||||
*/
|
||||
public IndexRequestBuilder setTTL(TimeValue ttl) {
|
||||
request.ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -223,7 +223,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
throw requestBlockException;
|
||||
}
|
||||
|
||||
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
|
||||
}
|
||||
ShardsIterator shardIt = shards(clusterState, request, concreteIndices);
|
||||
nodeIds = new HashMap<>();
|
||||
|
||||
|
@ -300,7 +302,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
}
|
||||
|
||||
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
}
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
// the current implementation of TransportService#sendRequest guards against this
|
||||
|
@ -351,7 +355,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception {
|
||||
List<ShardRouting> shards = request.getShards();
|
||||
final int totalShards = shards.size();
|
||||
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
|
||||
}
|
||||
final Object[] shardResultOrExceptions = new Object[totalShards];
|
||||
|
||||
int shardIndex = -1;
|
||||
|
@ -375,10 +381,14 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
|
||||
private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) {
|
||||
try {
|
||||
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
}
|
||||
ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting);
|
||||
shardResults[shardIndex] = result;
|
||||
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
|
||||
e.setIndex(shardRouting.getIndex());
|
||||
|
|
|
@ -160,7 +160,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
protected boolean retryPrimaryException(Throwable e) {
|
||||
return e.getClass() == RetryOnPrimaryException.class
|
||||
|| TransportActions.isShardNotAvailableException(e);
|
||||
|| TransportActions.isShardNotAvailableException(e);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,7 +247,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
setShard(shardId);
|
||||
}
|
||||
|
||||
public RetryOnReplicaException(StreamInput in) throws IOException{
|
||||
public RetryOnReplicaException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
setShard(shardId);
|
||||
}
|
||||
|
||||
public RetryOnPrimaryException(StreamInput in) throws IOException{
|
||||
public RetryOnPrimaryException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
}
|
||||
|
@ -476,7 +476,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
try {
|
||||
// if we got disconnected from the node, or the node / shard is not in the right state (being closed)
|
||||
if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException ||
|
||||
retryPrimaryException(exp)) {
|
||||
retryPrimaryException(exp)) {
|
||||
// we already marked it as started when we executed it (removed the listener) so pass false
|
||||
// to re-add to the cluster listener
|
||||
logger.trace("received an error from node the primary was assigned to ({}), scheduling a retry", exp.getMessage());
|
||||
|
@ -649,7 +649,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
|
||||
if (sizeActive < requiredNumber) {
|
||||
logger.trace("not enough active copies of shard [{}] to meet write consistency of [{}] (have {}, needed {}), scheduling a retry. action [{}], request [{}]",
|
||||
shard.shardId(), consistencyLevel, sizeActive, requiredNumber, actionName, internalRequest.request);
|
||||
shard.shardId(), consistencyLevel, sizeActive, requiredNumber, actionName, internalRequest.request);
|
||||
return "Not enough active copies to meet write consistency of [" + consistencyLevel + "] (have " + sizeActive + ", needed " + requiredNumber + ").";
|
||||
} else {
|
||||
return null;
|
||||
|
@ -833,7 +833,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
protected void doRun() {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}], primary on start", pending.get(),
|
||||
actionName, replicaRequest, observer.observedState().version(), originalPrimaryShard);
|
||||
actionName, replicaRequest, observer.observedState().version(), originalPrimaryShard);
|
||||
}
|
||||
if (pending.get() == 0) {
|
||||
doFinish();
|
||||
|
@ -883,23 +883,23 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (!nodeId.equals(observer.observedState().nodes().localNodeId())) {
|
||||
final DiscoveryNode node = observer.observedState().nodes().get(nodeId);
|
||||
transportService.sendRequest(node, transportReplicaAction, replicaRequest,
|
||||
transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty vResponse) {
|
||||
onReplicaSuccess();
|
||||
}
|
||||
transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty vResponse) {
|
||||
onReplicaSuccess();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.trace("[{}] transport failure during replica request [{}], action [{}]", exp, node, replicaRequest, actionName);
|
||||
if (ignoreReplicaException(exp)) {
|
||||
onReplicaFailure(nodeId, exp);
|
||||
} else {
|
||||
logger.warn("{} failed to perform {} on node {}", exp, shardIt.shardId(), actionName, node);
|
||||
shardStateAction.shardFailed(shard, indexMetaData.getIndexUUID(), "failed to perform " + actionName + " on replica on node " + node, exp, shardFailedTimeout, new ReplicationFailedShardStateListener(nodeId, exp));
|
||||
}
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.trace("[{}] transport failure during replica request [{}], action [{}]", exp, node, replicaRequest, actionName);
|
||||
if (ignoreReplicaException(exp)) {
|
||||
onReplicaFailure(nodeId, exp);
|
||||
} else {
|
||||
logger.warn("{} failed to perform {} on node {}", exp, shardIt.shardId(), actionName, node);
|
||||
shardStateAction.shardFailed(shard, indexMetaData.getIndexUUID(), "failed to perform " + actionName + " on replica on node " + node, exp, shardFailedTimeout, new ReplicationFailedShardStateListener(nodeId, exp));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
} else {
|
||||
try {
|
||||
threadPool.executor(executor).execute(new AbstractRunnable() {
|
||||
|
@ -973,18 +973,18 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) {
|
||||
RestStatus restStatus = ExceptionsHelper.status(entry.getValue());
|
||||
failuresArray[slot++] = new ReplicationResponse.ShardInfo.Failure(
|
||||
shardId.getIndex(), shardId.getId(), entry.getKey(), entry.getValue(), restStatus, false
|
||||
shardId.getIndex(), shardId.getId(), entry.getKey(), entry.getValue(), restStatus, false
|
||||
);
|
||||
}
|
||||
} else {
|
||||
failuresArray = ReplicationResponse.EMPTY;
|
||||
}
|
||||
finalResponse.setShardInfo(new ReplicationResponse.ShardInfo(
|
||||
totalShards,
|
||||
success.get(),
|
||||
failuresArray
|
||||
totalShards,
|
||||
success.get(),
|
||||
failuresArray
|
||||
|
||||
)
|
||||
)
|
||||
);
|
||||
listener.onResponse(finalResponse);
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
throw new DocumentMissingException(shardId, request.type(), request.id());
|
||||
}
|
||||
IndexRequest indexRequest = request.docAsUpsert() ? request.doc() : request.upsertRequest();
|
||||
Long ttl = indexRequest.ttl();
|
||||
TimeValue ttl = indexRequest.ttl();
|
||||
if (request.scriptedUpsert() && request.script() != null) {
|
||||
// Run the script to perform the create logic
|
||||
IndexRequest upsert = request.upsertRequest();
|
||||
|
@ -100,7 +100,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
ctx.put("_source", upsertDoc);
|
||||
ctx = executeScript(request, ctx);
|
||||
//Allow the script to set TTL using ctx._ttl
|
||||
if (ttl < 0) {
|
||||
if (ttl == null) {
|
||||
ttl = getTTLFromScriptContext(ctx);
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
indexRequest.index(request.index()).type(request.type()).id(request.id())
|
||||
// it has to be a "create!"
|
||||
.create(true)
|
||||
.ttl(ttl == null || ttl < 0 ? null : ttl)
|
||||
.ttl(ttl)
|
||||
.refresh(request.refresh())
|
||||
.routing(request.routing())
|
||||
.parent(request.parent())
|
||||
|
@ -152,7 +152,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(getResult.internalSourceRef(), true);
|
||||
String operation = null;
|
||||
String timestamp = null;
|
||||
Long ttl = null;
|
||||
TimeValue ttl = null;
|
||||
final Map<String, Object> updatedSourceAsMap;
|
||||
final XContentType updateSourceContentType = sourceAndContent.v1();
|
||||
String routing = getResult.getFields().containsKey(RoutingFieldMapper.NAME) ? getResult.field(RoutingFieldMapper.NAME).getValue().toString() : null;
|
||||
|
@ -161,7 +161,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
if (request.script() == null && request.doc() != null) {
|
||||
IndexRequest indexRequest = request.doc();
|
||||
updatedSourceAsMap = sourceAndContent.v2();
|
||||
if (indexRequest.ttl() > 0) {
|
||||
if (indexRequest.ttl() != null) {
|
||||
ttl = indexRequest.ttl();
|
||||
}
|
||||
timestamp = indexRequest.timestamp();
|
||||
|
@ -212,9 +212,9 @@ public class UpdateHelper extends AbstractComponent {
|
|||
// apply script to update the source
|
||||
// No TTL has been given in the update script so we keep previous TTL value if there is one
|
||||
if (ttl == null) {
|
||||
ttl = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
||||
if (ttl != null) {
|
||||
ttl = ttl - TimeValue.nsecToMSec(System.nanoTime() - getDateNS); // It is an approximation of exact TTL value, could be improved
|
||||
Long ttlAsLong = getResult.getFields().containsKey(TTLFieldMapper.NAME) ? (Long) getResult.field(TTLFieldMapper.NAME).getValue() : null;
|
||||
if (ttlAsLong != null) {
|
||||
ttl = new TimeValue(ttlAsLong - TimeValue.nsecToMSec(System.nanoTime() - getDateNS));// It is an approximation of exact TTL value, could be improved
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,17 +257,15 @@ public class UpdateHelper extends AbstractComponent {
|
|||
return ctx;
|
||||
}
|
||||
|
||||
private Long getTTLFromScriptContext(Map<String, Object> ctx) {
|
||||
Long ttl = null;
|
||||
private TimeValue getTTLFromScriptContext(Map<String, Object> ctx) {
|
||||
Object fetchedTTL = ctx.get("_ttl");
|
||||
if (fetchedTTL != null) {
|
||||
if (fetchedTTL instanceof Number) {
|
||||
ttl = ((Number) fetchedTTL).longValue();
|
||||
} else {
|
||||
ttl = TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl").millis();
|
||||
return new TimeValue(((Number) fetchedTTL).longValue());
|
||||
}
|
||||
return TimeValue.parseTimeValue((String) fetchedTTL, null, "_ttl");
|
||||
}
|
||||
return ttl;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -338,13 +336,10 @@ public class UpdateHelper extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public static enum Operation {
|
||||
|
||||
public enum Operation {
|
||||
UPSERT,
|
||||
INDEX,
|
||||
DELETE,
|
||||
NONE
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.index.IndexRequest;
|
|||
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -325,7 +326,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document. Note that if detectNoop is true (the default)
|
||||
* Set the new ttl of the document as a long. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
|
@ -333,4 +334,24 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document as a time value expression. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
public UpdateRequestBuilder setTtl(String ttl) {
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document as a {@link TimeValue} instance. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
public UpdateRequestBuilder setTtl(TimeValue ttl) {
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,6 +66,7 @@ final class JNACLibrary {
|
|||
}
|
||||
|
||||
static native int getrlimit(int resource, Rlimit rlimit);
|
||||
static native int setrlimit(int resource, Rlimit rlimit);
|
||||
|
||||
static native String strerror(int errno);
|
||||
|
||||
|
|
|
@ -217,4 +217,88 @@ final class JNAKernel32Library {
|
|||
* @return true if the function succeeds.
|
||||
*/
|
||||
native boolean CloseHandle(Pointer handle);
|
||||
|
||||
/**
|
||||
* Creates or opens a new job object
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param jobAttributes security attributes
|
||||
* @param name job name
|
||||
* @return job handle if the function succeeds
|
||||
*/
|
||||
native Pointer CreateJobObjectW(Pointer jobAttributes, String name);
|
||||
|
||||
/**
|
||||
* Associates a process with an existing job
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param job job handle
|
||||
* @param process process handle
|
||||
* @return true if the function succeeds
|
||||
*/
|
||||
native boolean AssignProcessToJobObject(Pointer job, Pointer process);
|
||||
|
||||
/**
|
||||
* Basic limit information for a job object
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx
|
||||
*/
|
||||
public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference {
|
||||
public long PerProcessUserTimeLimit;
|
||||
public long PerJobUserTimeLimit;
|
||||
public int LimitFlags;
|
||||
public SizeT MinimumWorkingSetSize;
|
||||
public SizeT MaximumWorkingSetSize;
|
||||
public int ActiveProcessLimit;
|
||||
public Pointer Affinity;
|
||||
public int PriorityClass;
|
||||
public int SchedulingClass;
|
||||
|
||||
@Override
|
||||
protected List<String> getFieldOrder() {
|
||||
return Arrays.asList(new String[] {
|
||||
"PerProcessUserTimeLimit", "PerJobUserTimeLimit", "LimitFlags", "MinimumWorkingSetSize",
|
||||
"MaximumWorkingSetSize", "ActiveProcessLimit", "Affinity", "PriorityClass", "SchedulingClass"
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Constant for JOBOBJECT_BASIC_LIMIT_INFORMATION in Query/Set InformationJobObject
|
||||
*/
|
||||
static final int JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS = 2;
|
||||
|
||||
/**
|
||||
* Constant for LimitFlags, indicating a process limit has been set
|
||||
*/
|
||||
static final int JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8;
|
||||
|
||||
/**
|
||||
* Get job limit and state information
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param job job handle
|
||||
* @param infoClass information class constant
|
||||
* @param info pointer to information structure
|
||||
* @param infoLength size of information structure
|
||||
* @param returnLength length of data written back to structure (or null if not wanted)
|
||||
* @return true if the function succeeds
|
||||
*/
|
||||
native boolean QueryInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength, Pointer returnLength);
|
||||
|
||||
/**
|
||||
* Set job limit and state information
|
||||
*
|
||||
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx
|
||||
*
|
||||
* @param job job handle
|
||||
* @param infoClass information class constant
|
||||
* @param info pointer to information structure
|
||||
* @param infoLength size of information structure
|
||||
* @return true if the function succeeds
|
||||
*/
|
||||
native boolean SetInformationJobObject(Pointer job, int infoClass, Pointer info, int infoLength);
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ class JNANatives {
|
|||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("unable to install syscall filter", t);
|
||||
}
|
||||
logger.warn("unable to install syscall filter: " + t.getMessage());
|
||||
logger.warn("unable to install syscall filter: ", t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ import java.util.Map;
|
|||
* Installs a limited form of secure computing mode,
|
||||
* to filters system calls to block process execution.
|
||||
* <p>
|
||||
* This is only supported on the Linux, Solaris, and Mac OS X operating systems.
|
||||
* This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows.
|
||||
* <p>
|
||||
* On Linux it currently supports amd64 and i386 architectures, requires Linux kernel 3.5 or above, and requires
|
||||
* {@code CONFIG_SECCOMP} and {@code CONFIG_SECCOMP_FILTER} compiled into the kernel.
|
||||
|
@ -71,6 +71,8 @@ import java.util.Map;
|
|||
* <li>{@code PRIV_PROC_EXEC}</li>
|
||||
* </ul>
|
||||
* <p>
|
||||
* On BSD systems, process creation is restricted with {@code setrlimit(RLIMIT_NPROC)}.
|
||||
* <p>
|
||||
* On Mac OS X Leopard or above, a custom {@code sandbox(7)} ("Seatbelt") profile is installed that
|
||||
* denies the following rules:
|
||||
* <ul>
|
||||
|
@ -78,6 +80,8 @@ import java.util.Map;
|
|||
* <li>{@code process-exec}</li>
|
||||
* </ul>
|
||||
* <p>
|
||||
* On Windows, process creation is restricted with {@code SetInformationJobObject/ActiveProcessLimit}.
|
||||
* <p>
|
||||
* This is not intended as a sandbox. It is another level of security, mostly intended to annoy
|
||||
* security researchers and make their lives more difficult in achieving "remote execution" exploits.
|
||||
* @see <a href="http://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt">
|
||||
|
@ -327,7 +331,8 @@ final class Seccomp {
|
|||
case 1: break; // already set by caller
|
||||
default:
|
||||
int errno = Native.getLastError();
|
||||
if (errno == ENOSYS) {
|
||||
if (errno == EINVAL) {
|
||||
// friendly error, this will be the typical case for an old kernel
|
||||
throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
|
||||
} else {
|
||||
throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno));
|
||||
|
@ -534,6 +539,73 @@ final class Seccomp {
|
|||
logger.debug("Solaris priv_set initialization successful");
|
||||
}
|
||||
|
||||
// BSD implementation via setrlimit(2)
|
||||
|
||||
// TODO: add OpenBSD to Lucene Constants
|
||||
// TODO: JNA doesn't have netbsd support, but this mechanism should work there too.
|
||||
static final boolean OPENBSD = Constants.OS_NAME.startsWith("OpenBSD");
|
||||
|
||||
// not a standard limit, means something different on linux, etc!
|
||||
static final int RLIMIT_NPROC = 7;
|
||||
|
||||
static void bsdImpl() {
|
||||
boolean supported = Constants.FREE_BSD || OPENBSD || Constants.MAC_OS_X;
|
||||
if (supported == false) {
|
||||
throw new IllegalStateException("bug: should not be trying to initialize RLIMIT_NPROC for an unsupported OS");
|
||||
}
|
||||
|
||||
JNACLibrary.Rlimit limit = new JNACLibrary.Rlimit();
|
||||
limit.rlim_cur.setValue(0);
|
||||
limit.rlim_max.setValue(0);
|
||||
if (JNACLibrary.setrlimit(RLIMIT_NPROC, limit) != 0) {
|
||||
throw new UnsupportedOperationException("RLIMIT_NPROC unavailable: " + JNACLibrary.strerror(Native.getLastError()));
|
||||
}
|
||||
|
||||
logger.debug("BSD RLIMIT_NPROC initialization successful");
|
||||
}
|
||||
|
||||
// windows impl via job ActiveProcessLimit
|
||||
|
||||
static void windowsImpl() {
|
||||
if (!Constants.WINDOWS) {
|
||||
throw new IllegalStateException("bug: should not be trying to initialize ActiveProcessLimit for an unsupported OS");
|
||||
}
|
||||
|
||||
JNAKernel32Library lib = JNAKernel32Library.getInstance();
|
||||
|
||||
// create a new Job
|
||||
Pointer job = lib.CreateJobObjectW(null, null);
|
||||
if (job == null) {
|
||||
throw new UnsupportedOperationException("CreateJobObject: " + Native.getLastError());
|
||||
}
|
||||
|
||||
try {
|
||||
// retrieve the current basic limits of the job
|
||||
int clazz = JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION_CLASS;
|
||||
JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION limits = new JNAKernel32Library.JOBOBJECT_BASIC_LIMIT_INFORMATION();
|
||||
limits.write();
|
||||
if (!lib.QueryInformationJobObject(job, clazz, limits.getPointer(), limits.size(), null)) {
|
||||
throw new UnsupportedOperationException("QueryInformationJobObject: " + Native.getLastError());
|
||||
}
|
||||
limits.read();
|
||||
// modify the number of active processes to be 1 (exactly the one process we will add to the job).
|
||||
limits.ActiveProcessLimit = 1;
|
||||
limits.LimitFlags = JNAKernel32Library.JOB_OBJECT_LIMIT_ACTIVE_PROCESS;
|
||||
limits.write();
|
||||
if (!lib.SetInformationJobObject(job, clazz, limits.getPointer(), limits.size())) {
|
||||
throw new UnsupportedOperationException("SetInformationJobObject: " + Native.getLastError());
|
||||
}
|
||||
// assign ourselves to the job
|
||||
if (!lib.AssignProcessToJobObject(job, lib.GetCurrentProcess())) {
|
||||
throw new UnsupportedOperationException("AssignProcessToJobObject: " + Native.getLastError());
|
||||
}
|
||||
} finally {
|
||||
lib.CloseHandle(job);
|
||||
}
|
||||
|
||||
logger.debug("Windows ActiveProcessLimit initialization successful");
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to drop the capability to execute for the process.
|
||||
* <p>
|
||||
|
@ -544,11 +616,19 @@ final class Seccomp {
|
|||
if (Constants.LINUX) {
|
||||
return linuxImpl();
|
||||
} else if (Constants.MAC_OS_X) {
|
||||
// try to enable both mechanisms if possible
|
||||
bsdImpl();
|
||||
macImpl(tmpFile);
|
||||
return 1;
|
||||
} else if (Constants.SUN_OS) {
|
||||
solarisImpl();
|
||||
return 1;
|
||||
} else if (Constants.FREE_BSD || OPENBSD) {
|
||||
bsdImpl();
|
||||
return 1;
|
||||
} else if (Constants.WINDOWS) {
|
||||
windowsImpl();
|
||||
return 1;
|
||||
} else {
|
||||
throw new UnsupportedOperationException("syscall filtering not supported for OS: '" + Constants.OS_NAME + "'");
|
||||
}
|
||||
|
|
|
@ -131,34 +131,48 @@ final class Security {
|
|||
@SuppressForbidden(reason = "proper use of URL")
|
||||
static Map<String,Policy> getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException {
|
||||
Map<String,Policy> map = new HashMap<>();
|
||||
// collect up lists of plugins and modules
|
||||
List<Path> pluginsAndModules = new ArrayList<>();
|
||||
if (Files.exists(environment.pluginsFile())) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
|
||||
for (Path plugin : stream) {
|
||||
Path policyFile = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY);
|
||||
if (Files.exists(policyFile)) {
|
||||
// first get a list of URLs for the plugins' jars:
|
||||
// we resolve symlinks so map is keyed on the normalize codebase name
|
||||
List<URL> codebases = new ArrayList<>();
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||
for (Path jar : jarStream) {
|
||||
codebases.add(jar.toRealPath().toUri().toURL());
|
||||
}
|
||||
}
|
||||
|
||||
// parse the plugin's policy file into a set of permissions
|
||||
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()]));
|
||||
|
||||
// consult this policy for each of the plugin's jars:
|
||||
for (URL url : codebases) {
|
||||
if (map.put(url.getFile(), policy) != null) {
|
||||
// just be paranoid ok?
|
||||
throw new IllegalStateException("per-plugin permissions already granted for jar file: " + url);
|
||||
}
|
||||
}
|
||||
pluginsAndModules.add(plugin);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (Files.exists(environment.modulesFile())) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.modulesFile())) {
|
||||
for (Path plugin : stream) {
|
||||
pluginsAndModules.add(plugin);
|
||||
}
|
||||
}
|
||||
}
|
||||
// now process each one
|
||||
for (Path plugin : pluginsAndModules) {
|
||||
Path policyFile = plugin.resolve(PluginInfo.ES_PLUGIN_POLICY);
|
||||
if (Files.exists(policyFile)) {
|
||||
// first get a list of URLs for the plugins' jars:
|
||||
// we resolve symlinks so map is keyed on the normalize codebase name
|
||||
List<URL> codebases = new ArrayList<>();
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||
for (Path jar : jarStream) {
|
||||
codebases.add(jar.toRealPath().toUri().toURL());
|
||||
}
|
||||
}
|
||||
|
||||
// parse the plugin's policy file into a set of permissions
|
||||
Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()]));
|
||||
|
||||
// consult this policy for each of the plugin's jars:
|
||||
for (URL url : codebases) {
|
||||
if (map.put(url.getFile(), policy) != null) {
|
||||
// just be paranoid ok?
|
||||
throw new IllegalStateException("per-plugin permissions already granted for jar file: " + url);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.unmodifiableMap(map);
|
||||
}
|
||||
|
||||
|
@ -228,6 +242,7 @@ final class Security {
|
|||
// read-only dirs
|
||||
addPath(policy, "path.home", environment.binFile(), "read,readlink");
|
||||
addPath(policy, "path.home", environment.libFile(), "read,readlink");
|
||||
addPath(policy, "path.home", environment.modulesFile(), "read,readlink");
|
||||
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
|
||||
addPath(policy, "path.conf", environment.configFile(), "read,readlink");
|
||||
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
|
||||
|
|
|
@ -38,7 +38,9 @@ import org.elasticsearch.common.inject.Injector;
|
|||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -46,6 +48,7 @@ import org.elasticsearch.env.EnvironmentModule;
|
|||
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
|
||||
import org.elasticsearch.monitor.MonitorService;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsModule;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
|
@ -122,13 +125,14 @@ public class TransportClient extends AbstractClient {
|
|||
.put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
|
||||
.build();
|
||||
|
||||
PluginsService pluginsService = new PluginsService(settings, null, pluginClasses);
|
||||
PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
|
||||
this.settings = pluginsService.updatedSettings();
|
||||
|
||||
Version version = Version.CURRENT;
|
||||
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
|
||||
final NetworkService networkService = new NetworkService(settings);
|
||||
final SettingsFilter settingsFilter = new SettingsFilter(settings);
|
||||
boolean success = false;
|
||||
try {
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
|
@ -138,8 +142,8 @@ public class TransportClient extends AbstractClient {
|
|||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new PluginsModule(pluginsService));
|
||||
modules.add(new SettingsModule(this.settings));
|
||||
modules.add(new NetworkModule());
|
||||
modules.add(new SettingsModule(this.settings, settingsFilter ));
|
||||
modules.add(new NetworkModule(networkService));
|
||||
modules.add(new ClusterNameModule(this.settings));
|
||||
modules.add(new ThreadPoolModule(threadPool));
|
||||
modules.add(new TransportModule(this.settings));
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
public interface AckedClusterStateTaskListener extends ClusterStateTaskListener {
|
||||
|
||||
/**
|
||||
* Called to determine which nodes the acknowledgement is expected from
|
||||
*
|
||||
* @param discoveryNode a node
|
||||
* @return true if the node is expected to send ack back, false otherwise
|
||||
*/
|
||||
boolean mustAck(DiscoveryNode discoveryNode);
|
||||
|
||||
/**
|
||||
* Called once all the nodes have acknowledged the cluster state update request. Must be
|
||||
* very lightweight execution, since it gets executed on the cluster service thread.
|
||||
*
|
||||
* @param t optional error that might have been thrown
|
||||
*/
|
||||
void onAllNodesAcked(@Nullable Throwable t);
|
||||
|
||||
/**
|
||||
* Called once the acknowledgement timeout defined by
|
||||
* {@link AckedClusterStateUpdateTask#ackTimeout()} has expired
|
||||
*/
|
||||
void onAckTimeout();
|
||||
|
||||
/**
|
||||
* Acknowledgement timeout, maximum time interval to wait for acknowledgements
|
||||
*/
|
||||
TimeValue ackTimeout();
|
||||
|
||||
}
|
|
@ -22,18 +22,24 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.ack.AckedRequest;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
|
||||
* all the nodes have acknowledged a cluster state update request
|
||||
*/
|
||||
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask {
|
||||
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask implements AckedClusterStateTaskListener {
|
||||
|
||||
private final ActionListener<Response> listener;
|
||||
private final AckedRequest request;
|
||||
|
||||
protected AckedClusterStateUpdateTask(AckedRequest request, ActionListener<Response> listener) {
|
||||
this(Priority.NORMAL, request, listener);
|
||||
}
|
||||
|
||||
protected AckedClusterStateUpdateTask(Priority priority, AckedRequest request, ActionListener<Response> listener) {
|
||||
super(priority);
|
||||
this.listener = listener;
|
||||
this.request = request;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
|
@ -101,12 +100,35 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
|
|||
void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener);
|
||||
|
||||
/**
|
||||
* Submits a task that will update the cluster state.
|
||||
* Submits a cluster state update task; submitted updates will be
|
||||
* batched across the same instance of executor. The exact batching
|
||||
* semantics depend on the underlying implementation but a rough
|
||||
* guideline is that if the update task is submitted while there
|
||||
* are pending update tasks for the same executor, these update
|
||||
* tasks will all be executed on the executor in a single batch
|
||||
*
|
||||
* @param source the source of the cluster state update task
|
||||
* @param task the state needed for the cluster state update task
|
||||
* @param config the cluster state update task configuration
|
||||
* @param executor the cluster state update task executor; tasks
|
||||
* that share the same executor will be executed
|
||||
* batches on this executor
|
||||
* @param listener callback after the cluster state update task
|
||||
* completes
|
||||
* @param <T> the type of the cluster state update task state
|
||||
*/
|
||||
void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask);
|
||||
<T> void submitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
final ClusterStateTaskExecutor<T> executor,
|
||||
final ClusterStateTaskListener listener);
|
||||
|
||||
/**
|
||||
* Submits a task that will update the cluster state (the task has a default priority of {@link Priority#NORMAL}).
|
||||
* Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener)},
|
||||
* submitted updates will not be batched.
|
||||
*
|
||||
* @param source the source of the cluster state update task
|
||||
* @param updateTask the full context for the cluster state update
|
||||
* task
|
||||
*/
|
||||
void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask);
|
||||
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -475,6 +475,17 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject(IndexMetaData.KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.getActiveAllocationIds()) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
// index metdata data
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
@ -766,7 +777,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
nodes = after.nodes.diff(before.nodes);
|
||||
metaData = after.metaData.diff(before.metaData);
|
||||
blocks = after.blocks.diff(before.blocks);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException {
|
||||
|
@ -778,17 +789,18 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
nodes = proto.nodes.readDiffFrom(in);
|
||||
metaData = proto.metaData.readDiffFrom(in);
|
||||
blocks = proto.blocks.readDiffFrom(in);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* Cluster state update task configuration for timeout and priority
|
||||
*/
|
||||
public interface ClusterStateTaskConfig {
|
||||
/**
|
||||
* The timeout for this cluster state update task configuration. If
|
||||
* the cluster state update task isn't processed within this
|
||||
* timeout, the associated {@link ClusterStateTaskListener#onFailure(String, Throwable)}
|
||||
* is invoked.
|
||||
*
|
||||
* @return the timeout, or null if one is not set
|
||||
*/
|
||||
@Nullable
|
||||
TimeValue timeout();
|
||||
|
||||
/**
|
||||
* The {@link Priority} for this cluster state update task configuration.
|
||||
*
|
||||
* @return the priority
|
||||
*/
|
||||
Priority priority();
|
||||
|
||||
/**
|
||||
* Build a cluster state update task configuration with the
|
||||
* specified {@link Priority} and no timeout.
|
||||
*
|
||||
* @param priority the priority for the associated cluster state
|
||||
* update task
|
||||
* @return the resulting cluster state update task configuration
|
||||
*/
|
||||
static ClusterStateTaskConfig build(Priority priority) {
|
||||
return new Basic(priority, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a cluster state update task configuration with the
|
||||
* specified {@link Priority} and timeout.
|
||||
*
|
||||
* @param priority the priority for the associated cluster state
|
||||
* update task
|
||||
* @param timeout the timeout for the associated cluster state
|
||||
* update task
|
||||
* @return the result cluster state update task configuration
|
||||
*/
|
||||
static ClusterStateTaskConfig build(Priority priority, TimeValue timeout) {
|
||||
return new Basic(priority, timeout);
|
||||
}
|
||||
|
||||
class Basic implements ClusterStateTaskConfig {
|
||||
final TimeValue timeout;
|
||||
final Priority priority;
|
||||
|
||||
public Basic(Priority priority, TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public interface ClusterStateTaskExecutor<T> {
|
||||
/**
|
||||
* Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state
|
||||
* should be changed.
|
||||
*/
|
||||
BatchResult<T> execute(ClusterState currentState, List<T> tasks) throws Exception;
|
||||
|
||||
/**
|
||||
* indicates whether this task should only run if current node is master
|
||||
*/
|
||||
default boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the result of a batched execution of cluster state update tasks
|
||||
* @param <T> the type of the cluster state update task
|
||||
*/
|
||||
class BatchResult<T> {
|
||||
final public ClusterState resultingState;
|
||||
final public Map<T, TaskResult> executionResults;
|
||||
|
||||
/**
|
||||
* Construct an execution result instance with a correspondence between the tasks and their execution result
|
||||
* @param resultingState the resulting cluster state
|
||||
* @param executionResults the correspondence between tasks and their outcome
|
||||
*/
|
||||
BatchResult(ClusterState resultingState, Map<T, TaskResult> executionResults) {
|
||||
this.resultingState = resultingState;
|
||||
this.executionResults = executionResults;
|
||||
}
|
||||
|
||||
public static <T> Builder<T> builder() {
|
||||
return new Builder<>();
|
||||
}
|
||||
|
||||
public static class Builder<T> {
|
||||
private final Map<T, TaskResult> executionResults = new IdentityHashMap<>();
|
||||
|
||||
public Builder<T> success(T task) {
|
||||
return result(task, TaskResult.success());
|
||||
}
|
||||
|
||||
public Builder<T> successes(Iterable<T> tasks) {
|
||||
for (T task : tasks) {
|
||||
success(task);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder<T> failure(T task, Throwable t) {
|
||||
return result(task, TaskResult.failure(t));
|
||||
}
|
||||
|
||||
public Builder<T> failures(Iterable<T> tasks, Throwable t) {
|
||||
for (T task : tasks) {
|
||||
failure(task, t);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
private Builder<T> result(T task, TaskResult executionResult) {
|
||||
executionResults.put(task, executionResult);
|
||||
return this;
|
||||
}
|
||||
|
||||
public BatchResult<T> build(ClusterState resultingState) {
|
||||
return new BatchResult<>(resultingState, executionResults);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final class TaskResult {
|
||||
private final Throwable failure;
|
||||
|
||||
private static final TaskResult SUCCESS = new TaskResult(null);
|
||||
|
||||
public static TaskResult success() {
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
public static TaskResult failure(Throwable failure) {
|
||||
return new TaskResult(failure);
|
||||
}
|
||||
|
||||
private TaskResult(Throwable failure) {
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return failure != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the execution result with the provided consumers
|
||||
* @param onSuccess handler to invoke on success
|
||||
* @param onFailure handler to invoke on failure; the throwable passed through will not be null
|
||||
*/
|
||||
public void handle(Runnable onSuccess, Consumer<Throwable> onFailure) {
|
||||
if (failure == null) {
|
||||
onSuccess.run();
|
||||
} else {
|
||||
onFailure.accept(failure);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,23 +16,28 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
package org.elasticsearch.test.rest;
|
||||
import java.util.List;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
public interface ClusterStateTaskListener {
|
||||
|
||||
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||
/**
|
||||
* A callback called when execute fails.
|
||||
*/
|
||||
void onFailure(String source, Throwable t);
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Rest API tests subset 0 */
|
||||
public class Rest0IT extends ESRestTestCase {
|
||||
public Rest0IT(@Name("yaml") RestTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
/**
|
||||
* called when the task was rejected because the local node is no longer master
|
||||
*/
|
||||
default void onNoLongerMaster(String source) {
|
||||
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
|
||||
}
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||
return createParameters(0, 8);
|
||||
|
||||
/**
|
||||
* Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed
|
||||
* properly by all listeners.
|
||||
*/
|
||||
default void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
}
|
||||
}
|
|
@ -20,13 +20,31 @@
|
|||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A task that can update the cluster state.
|
||||
*/
|
||||
abstract public class ClusterStateUpdateTask {
|
||||
abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<ClusterStateUpdateTask>, ClusterStateTaskListener {
|
||||
|
||||
final private Priority priority;
|
||||
|
||||
public ClusterStateUpdateTask() {
|
||||
this(Priority.NORMAL);
|
||||
}
|
||||
|
||||
public ClusterStateUpdateTask(Priority priority) {
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
final public BatchResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
|
||||
ClusterState result = execute(currentState);
|
||||
return BatchResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the cluster state based on the current state. Return the *same instance* if no state
|
||||
|
@ -39,28 +57,6 @@ abstract public class ClusterStateUpdateTask {
|
|||
*/
|
||||
abstract public void onFailure(String source, Throwable t);
|
||||
|
||||
|
||||
/**
|
||||
* indicates whether this task should only run if current node is master
|
||||
*/
|
||||
public boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* called when the task was rejected because the local node is no longer master
|
||||
*/
|
||||
public void onNoLongerMaster(String source) {
|
||||
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the result of the {@link #execute(ClusterState)} have been processed
|
||||
* properly by all listeners.
|
||||
*/
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
}
|
||||
|
||||
/**
|
||||
* If the cluster state update task wasn't processed by the provided timeout, call
|
||||
* {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default).
|
||||
|
@ -70,5 +66,8 @@ abstract public class ClusterStateUpdateTask {
|
|||
return null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
|||
public interface Diff<T> {
|
||||
|
||||
/**
|
||||
* Applies difference to the specified part and retunrs the resulted part
|
||||
* Applies difference to the specified part and returns the resulted part
|
||||
*/
|
||||
T apply(T part);
|
||||
|
||||
|
|
|
@ -19,263 +19,630 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntCursor;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public final class DiffableUtils {
|
||||
private DiffableUtils() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for String keys
|
||||
*/
|
||||
public static KeySerializer<String> getStringKeySerializer() {
|
||||
return StringKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for Integer keys. Encodes as Int.
|
||||
*/
|
||||
public static KeySerializer<Integer> getIntKeySerializer() {
|
||||
return IntKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a map key serializer for Integer keys. Encodes as VInt.
|
||||
*/
|
||||
public static KeySerializer<Integer> getVIntKeySerializer() {
|
||||
return VIntKeySerializer.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenMaps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> diff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenMapDiff<>(before, after);
|
||||
return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenMaps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> diff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenIntMaps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two ImmutableOpenIntMaps of non-diffable objects
|
||||
*/
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> diff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after, KeySerializer<Integer> keySerializer, NonDiffableValueSerializer<Integer, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two Maps of Diffable objects.
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> diff(Map<String, T> before, Map<String, T> after) {
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer) {
|
||||
assert after != null && before != null;
|
||||
return new JdkMapDiff<>(before, after);
|
||||
return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates diff between two Maps of non-diffable objects
|
||||
*/
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> diff(Map<K, T> before, Map<K, T> after, KeySerializer<K> keySerializer, NonDiffableValueSerializer<K, T> valueSerializer) {
|
||||
assert after != null && before != null;
|
||||
return new JdkMapDiff<>(before, after, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keyedReader);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two Maps.
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, KeyedReader<T> keyedReader) throws IOException {
|
||||
return new JdkMapDiff<>(in, keyedReader);
|
||||
public static <K, T> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<ImmutableOpenMap<String, T>> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto));
|
||||
public static <T> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads an object that represents difference between two Maps.
|
||||
* Loads an object that represents difference between two Maps of Diffable objects
|
||||
*/
|
||||
public static <T extends Diffable<T>> Diff<Map<String, T>> readJdkMapDiff(StreamInput in, T proto) throws IOException {
|
||||
return new JdkMapDiff<>(in, new PrototypeReader<>(proto));
|
||||
public static <K, T> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
/**
|
||||
* A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's
|
||||
* used in custom metadata deserialization.
|
||||
* Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public interface KeyedReader<T> {
|
||||
|
||||
/**
|
||||
* reads an object of the type T from the stream input
|
||||
*/
|
||||
T readFrom(StreamInput in, String key) throws IOException;
|
||||
|
||||
/**
|
||||
* reads an object that respresents differences between two objects with the type T from the stream input
|
||||
*/
|
||||
Diff<T> readDiffFrom(StreamInput in, String key) throws IOException;
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, ImmutableOpenMap<K, T>> readImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the KeyedReader that is using a prototype object for reading operations
|
||||
*
|
||||
* Note: this implementation is ignoring the key.
|
||||
* Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static class PrototypeReader<T extends Diffable<T>> implements KeyedReader<T> {
|
||||
private T proto;
|
||||
|
||||
public PrototypeReader(T proto) {
|
||||
this.proto = proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T readFrom(StreamInput in, String key) throws IOException {
|
||||
return proto.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<T> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
return proto.readDiffFrom(in);
|
||||
}
|
||||
public static <T extends Diffable<T>> MapDiff<Integer, T, ImmutableOpenIntMap<T>> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, T proto) throws IOException {
|
||||
return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two Maps of Diffable objects.
|
||||
* Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object
|
||||
*/
|
||||
public static <K, T extends Diffable<T>> MapDiff<K, T, Map<K, T>> readJdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, T proto) throws IOException {
|
||||
return new JdkMapDiff<>(in, keySerializer, new DiffablePrototypeValueReader<>(proto));
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two Maps of (possibly diffable) objects.
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
*/
|
||||
private static class JdkMapDiff<T extends Diffable<T>> extends MapDiff<T, Map<String, T>> {
|
||||
private static class JdkMapDiff<K, T> extends MapDiff<K, T, Map<K, T>> {
|
||||
|
||||
protected JdkMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
super(in, reader);
|
||||
protected JdkMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public JdkMapDiff(Map<String, T> before, Map<String, T> after) {
|
||||
public JdkMapDiff(Map<K, T> before, Map<K, T> after,
|
||||
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
for (String key : before.keySet()) {
|
||||
|
||||
for (K key : before.keySet()) {
|
||||
if (!after.containsKey(key)) {
|
||||
deletes.add(key);
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, T> partIter : after.entrySet()) {
|
||||
|
||||
for (Map.Entry<K, T> partIter : after.entrySet()) {
|
||||
T beforePart = before.get(partIter.getKey());
|
||||
if (beforePart == null) {
|
||||
adds.put(partIter.getKey(), partIter.getValue());
|
||||
upserts.put(partIter.getKey(), partIter.getValue());
|
||||
} else if (partIter.getValue().equals(beforePart) == false) {
|
||||
diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart));
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.getKey(), valueSerializer.diff(partIter.getValue(), beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.getKey(), partIter.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, T> apply(Map<String, T> map) {
|
||||
Map<String, T> builder = new HashMap<>();
|
||||
public Map<K, T> apply(Map<K, T> map) {
|
||||
Map<K, T> builder = new HashMap<>();
|
||||
builder.putAll(map);
|
||||
|
||||
for (String part : deletes) {
|
||||
for (K part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
|
||||
for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<String, T> additon : adds.entrySet()) {
|
||||
builder.put(additon.getKey(), additon.getValue());
|
||||
for (Map.Entry<K, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two ImmutableOpenMap of diffable objects
|
||||
* Represents differences between two ImmutableOpenMap of (possibly diffable) objects
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
* @param <T> the object type
|
||||
*/
|
||||
private static class ImmutableOpenMapDiff<T extends Diffable<T>> extends MapDiff<T, ImmutableOpenMap<String, T>> {
|
||||
private static class ImmutableOpenMapDiff<K, T> extends MapDiff<K, T, ImmutableOpenMap<K, T>> {
|
||||
|
||||
protected ImmutableOpenMapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
super(in, reader);
|
||||
protected ImmutableOpenMapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public ImmutableOpenMapDiff(ImmutableOpenMap<String, T> before, ImmutableOpenMap<String, T> after) {
|
||||
public ImmutableOpenMapDiff(ImmutableOpenMap<K, T> before, ImmutableOpenMap<K, T> after,
|
||||
KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
for (ObjectCursor<String> key : before.keys()) {
|
||||
|
||||
for (ObjectCursor<K> key : before.keys()) {
|
||||
if (!after.containsKey(key.value)) {
|
||||
deletes.add(key.value);
|
||||
}
|
||||
}
|
||||
for (ObjectObjectCursor<String, T> partIter : after) {
|
||||
|
||||
for (ObjectObjectCursor<K, T> partIter : after) {
|
||||
T beforePart = before.get(partIter.key);
|
||||
if (beforePart == null) {
|
||||
adds.put(partIter.key, partIter.value);
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
} else if (partIter.value.equals(beforePart) == false) {
|
||||
diffs.put(partIter.key, partIter.value.diff(beforePart));
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenMap<String, T> apply(ImmutableOpenMap<String, T> map) {
|
||||
ImmutableOpenMap.Builder<String, T> builder = ImmutableOpenMap.builder();
|
||||
public ImmutableOpenMap<K, T> apply(ImmutableOpenMap<K, T> map) {
|
||||
ImmutableOpenMap.Builder<K, T> builder = ImmutableOpenMap.builder();
|
||||
builder.putAll(map);
|
||||
|
||||
for (String part : deletes) {
|
||||
for (K part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<String, Diff<T>> diff : diffs.entrySet()) {
|
||||
for (Map.Entry<K, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<String, T> additon : adds.entrySet()) {
|
||||
builder.put(additon.getKey(), additon.getValue());
|
||||
for (Map.Entry<K, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two maps of diffable objects
|
||||
* Represents differences between two ImmutableOpenIntMap of (possibly diffable) objects
|
||||
*
|
||||
* This class is used as base class for different map implementations
|
||||
*
|
||||
* @param <T> the diffable object
|
||||
* @param <T> the object type
|
||||
*/
|
||||
private static abstract class MapDiff<T extends Diffable<T>, M> implements Diff<M> {
|
||||
private static class ImmutableOpenIntMapDiff<T> extends MapDiff<Integer, T, ImmutableOpenIntMap<T>> {
|
||||
|
||||
protected final List<String> deletes;
|
||||
protected final Map<String, Diff<T>> diffs;
|
||||
protected final Map<String, T> adds;
|
||||
|
||||
protected MapDiff() {
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
adds = new HashMap<>();
|
||||
protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) throws IOException {
|
||||
super(in, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
public ImmutableOpenIntMapDiff(ImmutableOpenIntMap<T> before, ImmutableOpenIntMap<T> after,
|
||||
KeySerializer<Integer> keySerializer, ValueSerializer<Integer, T> valueSerializer) {
|
||||
super(keySerializer, valueSerializer);
|
||||
assert after != null && before != null;
|
||||
|
||||
for (IntCursor key : before.keys()) {
|
||||
if (!after.containsKey(key.value)) {
|
||||
deletes.add(key.value);
|
||||
}
|
||||
}
|
||||
|
||||
for (IntObjectCursor<T> partIter : after) {
|
||||
T beforePart = before.get(partIter.key);
|
||||
if (beforePart == null) {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
} else if (partIter.value.equals(beforePart) == false) {
|
||||
if (valueSerializer.supportsDiffableValues()) {
|
||||
diffs.put(partIter.key, valueSerializer.diff(partIter.value, beforePart));
|
||||
} else {
|
||||
upserts.put(partIter.key, partIter.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableOpenIntMap<T> apply(ImmutableOpenIntMap<T> map) {
|
||||
ImmutableOpenIntMap.Builder<T> builder = ImmutableOpenIntMap.builder();
|
||||
builder.putAll(map);
|
||||
|
||||
for (Integer part : deletes) {
|
||||
builder.remove(part);
|
||||
}
|
||||
|
||||
for (Map.Entry<Integer, Diff<T>> diff : diffs.entrySet()) {
|
||||
builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey())));
|
||||
}
|
||||
|
||||
for (Map.Entry<Integer, T> upsert : upserts.entrySet()) {
|
||||
builder.put(upsert.getKey(), upsert.getValue());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents differences between two maps of objects and is used as base class for different map implementations.
|
||||
*
|
||||
* Implements serialization. How differences are applied is left to subclasses.
|
||||
*
|
||||
* @param <K> the type of map keys
|
||||
* @param <T> the type of map values
|
||||
* @param <M> the map implementation type
|
||||
*/
|
||||
public static abstract class MapDiff<K, T, M> implements Diff<M> {
|
||||
|
||||
protected final List<K> deletes;
|
||||
protected final Map<K, Diff<T>> diffs; // incremental updates
|
||||
protected final Map<K, T> upserts; // additions or full updates
|
||||
protected final KeySerializer<K> keySerializer;
|
||||
protected final ValueSerializer<K, T> valueSerializer;
|
||||
|
||||
protected MapDiff(KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
adds = new HashMap<>();
|
||||
upserts = new HashMap<>();
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeySerializer<K> keySerializer, ValueSerializer<K, T> valueSerializer) throws IOException {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
deletes = new ArrayList<>();
|
||||
diffs = new HashMap<>();
|
||||
upserts = new HashMap<>();
|
||||
int deletesCount = in.readVInt();
|
||||
for (int i = 0; i < deletesCount; i++) {
|
||||
deletes.add(in.readString());
|
||||
deletes.add(keySerializer.readKey(in));
|
||||
}
|
||||
|
||||
int diffsCount = in.readVInt();
|
||||
for (int i = 0; i < diffsCount; i++) {
|
||||
String key = in.readString();
|
||||
Diff<T> diff = reader.readDiffFrom(in, key);
|
||||
K key = keySerializer.readKey(in);
|
||||
Diff<T> diff = valueSerializer.readDiff(in, key);
|
||||
diffs.put(key, diff);
|
||||
}
|
||||
|
||||
int addsCount = in.readVInt();
|
||||
for (int i = 0; i < addsCount; i++) {
|
||||
String key = in.readString();
|
||||
T part = reader.readFrom(in, key);
|
||||
adds.put(key, part);
|
||||
int upsertsCount = in.readVInt();
|
||||
for (int i = 0; i < upsertsCount; i++) {
|
||||
K key = keySerializer.readKey(in);
|
||||
T newValue = valueSerializer.read(in, key);
|
||||
upserts.put(key, newValue);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The keys that, when this diff is applied to a map, should be removed from the map.
|
||||
*
|
||||
* @return the list of keys that are deleted
|
||||
*/
|
||||
public List<K> getDeletes() {
|
||||
return deletes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map entries that, when this diff is applied to a map, should be
|
||||
* incrementally updated. The incremental update is represented using
|
||||
* the {@link Diff} interface.
|
||||
*
|
||||
* @return the map entries that are incrementally updated
|
||||
*/
|
||||
public Map<K, Diff<T>> getDiffs() {
|
||||
return diffs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Map entries that, when this diff is applied to a map, should be
|
||||
* added to the map or fully replace the previous value.
|
||||
*
|
||||
* @return the map entries that are additions or full updates
|
||||
*/
|
||||
public Map<K, T> getUpserts() {
|
||||
return upserts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(deletes.size());
|
||||
for (String delete : deletes) {
|
||||
out.writeString(delete);
|
||||
for (K delete : deletes) {
|
||||
keySerializer.writeKey(delete, out);
|
||||
}
|
||||
|
||||
out.writeVInt(diffs.size());
|
||||
for (Map.Entry<String, Diff<T>> entry : diffs.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
for (Map.Entry<K, Diff<T>> entry : diffs.entrySet()) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.writeDiff(entry.getValue(), out);
|
||||
}
|
||||
|
||||
out.writeVInt(adds.size());
|
||||
for (Map.Entry<String, T> entry : adds.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
out.writeVInt(upserts.size());
|
||||
for (Map.Entry<K, T> entry : upserts.entrySet()) {
|
||||
keySerializer.writeKey(entry.getKey(), out);
|
||||
valueSerializer.write(entry.getValue(), out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides read and write operations to serialize keys of map
|
||||
* @param <K> type of key
|
||||
*/
|
||||
public interface KeySerializer<K> {
|
||||
void writeKey(K key, StreamOutput out) throws IOException;
|
||||
K readKey(StreamInput in) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes String keys of a map
|
||||
*/
|
||||
private static final class StringKeySerializer implements KeySerializer<String> {
|
||||
private static final StringKeySerializer INSTANCE = new StringKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(String key, StreamOutput out) throws IOException {
|
||||
out.writeString(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readKey(StreamInput in) throws IOException {
|
||||
return in.readString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes Integer keys of a map as an Int
|
||||
*/
|
||||
private static final class IntKeySerializer implements KeySerializer<Integer> {
|
||||
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(Integer key, StreamOutput out) throws IOException {
|
||||
out.writeInt(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer readKey(StreamInput in) throws IOException {
|
||||
return in.readInt();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializes Integer keys of a map as a VInt. Requires keys to be positive.
|
||||
*/
|
||||
private static final class VIntKeySerializer implements KeySerializer<Integer> {
|
||||
public static final IntKeySerializer INSTANCE = new IntKeySerializer();
|
||||
|
||||
@Override
|
||||
public void writeKey(Integer key, StreamOutput out) throws IOException {
|
||||
if (key < 0) {
|
||||
throw new IllegalArgumentException("Map key [" + key + "] must be positive");
|
||||
}
|
||||
out.writeVInt(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer readKey(StreamInput in) throws IOException {
|
||||
return in.readVInt();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides read and write operations to serialize map values.
|
||||
* Reading of values can be made dependent on map key.
|
||||
*
|
||||
* Also provides operations to distinguish whether map values are diffable.
|
||||
*
|
||||
* Should not be directly implemented, instead implement either
|
||||
* {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}.
|
||||
*
|
||||
* @param <K> key type of map
|
||||
* @param <V> value type of map
|
||||
*/
|
||||
public interface ValueSerializer<K, V> {
|
||||
|
||||
/**
|
||||
* Writes value to stream
|
||||
*/
|
||||
void write(V value, StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads value from stream. Reading operation can be made dependent on map key.
|
||||
*/
|
||||
V read(StreamInput in, K key) throws IOException;
|
||||
|
||||
/**
|
||||
* Whether this serializer supports diffable values
|
||||
*/
|
||||
boolean supportsDiffableValues();
|
||||
|
||||
/**
|
||||
* Computes diff if this serializer supports diffable values
|
||||
*/
|
||||
Diff<V> diff(V value, V beforePart);
|
||||
|
||||
/**
|
||||
* Writes value as diff to stream if this serializer supports diffable values
|
||||
*/
|
||||
void writeDiff(Diff<V> value, StreamOutput out) throws IOException;
|
||||
|
||||
/**
|
||||
* Reads value as diff from stream if this serializer supports diffable values.
|
||||
* Reading operation can be made dependent on map key.
|
||||
*/
|
||||
Diff<V> readDiff(StreamInput in, K key) throws IOException;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializer for Diffable map values. Needs to implement read and readDiff methods.
|
||||
*
|
||||
* @param <K> type of map keys
|
||||
* @param <V> type of map values
|
||||
*/
|
||||
public static abstract class DiffableValueSerializer<K, V extends Diffable<V>> implements ValueSerializer<K, V> {
|
||||
private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() {
|
||||
@Override
|
||||
public Object read(StreamInput in, Object key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Object> readDiff(StreamInput in, Object key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
};
|
||||
|
||||
private static <K, V extends Diffable<V>> DiffableValueSerializer<K, V> getWriteOnlyInstance() {
|
||||
return WRITE_ONLY_INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsDiffableValues() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> diff(V value, V beforePart) {
|
||||
return value.diff(beforePart);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(V value, StreamOutput out) throws IOException {
|
||||
value.writeTo(out);
|
||||
}
|
||||
|
||||
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
|
||||
value.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Serializer for non-diffable map values
|
||||
*
|
||||
* @param <K> type of map keys
|
||||
* @param <V> type of map values
|
||||
*/
|
||||
public static abstract class NonDiffableValueSerializer<K, V> implements ValueSerializer<K, V> {
|
||||
@Override
|
||||
public boolean supportsDiffableValues() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> diff(V value, V beforePart) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeDiff(Diff<V> value, StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of the ValueSerializer that uses a prototype object for reading operations
|
||||
*
|
||||
* Note: this implementation is ignoring the key.
|
||||
*/
|
||||
public static class DiffablePrototypeValueReader<K, V extends Diffable<V>> extends DiffableValueSerializer<K, V> {
|
||||
private final V proto;
|
||||
|
||||
public DiffablePrototypeValueReader(V proto) {
|
||||
this.proto = proto;
|
||||
}
|
||||
|
||||
@Override
|
||||
public V read(StreamInput in, K key) throws IOException {
|
||||
return proto.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<V> readDiff(StreamInput in, K key) throws IOException {
|
||||
return proto.readDiffFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of ValueSerializer that serializes immutable sets
|
||||
*
|
||||
* @param <K> type of map key
|
||||
*/
|
||||
public static class StringSetValueSerializer<K> extends NonDiffableValueSerializer<K, Set<String>> {
|
||||
private static final StringSetValueSerializer INSTANCE = new StringSetValueSerializer();
|
||||
|
||||
public static <K> StringSetValueSerializer<K> getInstance() {
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Set<String> value, StreamOutput out) throws IOException {
|
||||
out.writeStringArray(value.toArray(new String[value.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> read(StreamInput in, K key) throws IOException {
|
||||
return Collections.unmodifiableSet(new HashSet<>(Arrays.asList(in.readStringArray())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -57,7 +56,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
|||
public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) {
|
||||
final DiscoveryNodes nodes = state.nodes();
|
||||
if (nodes.masterNode() == null) {
|
||||
logger.warn("can't send mapping refresh for [{}][{}], no master known.", request.index(), Strings.arrayToCommaDelimitedString(request.types()));
|
||||
logger.warn("can't send mapping refresh for [{}], no master known.", request.index());
|
||||
return;
|
||||
}
|
||||
transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
|
@ -67,7 +66,7 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception {
|
||||
metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types());
|
||||
metaDataMappingService.refreshMapping(request.index(), request.indexUUID());
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
@ -76,16 +75,14 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
|||
|
||||
private String index;
|
||||
private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
|
||||
private String[] types;
|
||||
private String nodeId;
|
||||
|
||||
public NodeMappingRefreshRequest() {
|
||||
}
|
||||
|
||||
public NodeMappingRefreshRequest(String index, String indexUUID, String[] types, String nodeId) {
|
||||
public NodeMappingRefreshRequest(String index, String indexUUID, String nodeId) {
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
this.types = types;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
|
@ -107,11 +104,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
|||
return indexUUID;
|
||||
}
|
||||
|
||||
|
||||
public String[] types() {
|
||||
return types;
|
||||
}
|
||||
|
||||
public String nodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
|
@ -120,7 +112,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeStringArray(types);
|
||||
out.writeString(nodeId);
|
||||
out.writeString(indexUUID);
|
||||
}
|
||||
|
@ -129,7 +120,6 @@ public class NodeMappingRefreshAction extends AbstractComponent {
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
types = in.readStringArray();
|
||||
nodeId = in.readString();
|
||||
indexUUID = in.readString();
|
||||
}
|
||||
|
|
|
@ -20,9 +20,7 @@
|
|||
package org.elasticsearch.cluster.action.shard;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
|
@ -38,15 +36,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
|
@ -63,9 +58,6 @@ public class ShardStateAction extends AbstractComponent {
|
|||
private final AllocationService allocationService;
|
||||
private final RoutingService routingService;
|
||||
|
||||
private final BlockingQueue<ShardRoutingEntry> startedShardsQueue = ConcurrentCollections.newBlockingQueue();
|
||||
private final BlockingQueue<ShardRoutingEntry> failedShardQueue = ConcurrentCollections.newBlockingQueue();
|
||||
|
||||
@Inject
|
||||
public ShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService,
|
||||
AllocationService allocationService, RoutingService routingService) {
|
||||
|
@ -141,104 +133,94 @@ public class ShardStateAction extends AbstractComponent {
|
|||
});
|
||||
}
|
||||
|
||||
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
|
||||
|
||||
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
failedShardQueue.add(shardRoutingEntry);
|
||||
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
shardFailedClusterStateHandler,
|
||||
shardFailedClusterStateHandler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
if (shardRoutingEntry.processed) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
|
||||
failedShardQueue.drainTo(shardRoutingEntries);
|
||||
|
||||
// nothing to process (a previous event has processed it already)
|
||||
if (shardRoutingEntries.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(shardRoutingEntries.size());
|
||||
|
||||
// mark all entries as processed
|
||||
for (ShardRoutingEntry entry : shardRoutingEntries) {
|
||||
entry.processed = true;
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(entry.shardRouting, entry.message, entry.failure));
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (!routingResult.changed()) {
|
||||
return currentState;
|
||||
}
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
|
||||
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
batchResultBuilder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
batchResultBuilder.failures(tasks, t);
|
||||
}
|
||||
return batchResultBuilder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
|
||||
logger.trace("unassigned shards after shard failures. scheduling a reroute.");
|
||||
routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
}
|
||||
|
||||
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
|
||||
new ShardStartedClusterStateHandler();
|
||||
|
||||
private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.debug("received shard started for {}", shardRoutingEntry);
|
||||
// buffer shard started requests, and the state update tasks will simply drain it
|
||||
// this is to optimize the number of "started" events we generate, and batch them
|
||||
// possibly, we can do time based batching as well, but usually, we would want to
|
||||
// process started events as fast as possible, to make shards available
|
||||
startedShardsQueue.add(shardRoutingEntry);
|
||||
|
||||
clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", Priority.URGENT,
|
||||
new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
clusterService.submitStateUpdateTask(
|
||||
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
shardRoutingEntry,
|
||||
ClusterStateTaskConfig.build(Priority.URGENT),
|
||||
shardStartedClusterStateHandler,
|
||||
shardStartedClusterStateHandler);
|
||||
}
|
||||
|
||||
if (shardRoutingEntry.processed) {
|
||||
return currentState;
|
||||
}
|
||||
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
|
||||
@Override
|
||||
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
|
||||
BatchResult.Builder<ShardRoutingEntry> builder = BatchResult.builder();
|
||||
List<ShardRouting> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
|
||||
for (ShardRoutingEntry task : tasks) {
|
||||
shardRoutingsToBeApplied.add(task.shardRouting);
|
||||
}
|
||||
ClusterState maybeUpdatedState = currentState;
|
||||
try {
|
||||
RoutingAllocation.Result result =
|
||||
allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied, true);
|
||||
if (result.changed()) {
|
||||
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
|
||||
}
|
||||
builder.successes(tasks);
|
||||
} catch (Throwable t) {
|
||||
builder.failures(tasks, t);
|
||||
}
|
||||
|
||||
List<ShardRoutingEntry> shardRoutingEntries = new ArrayList<>();
|
||||
startedShardsQueue.drainTo(shardRoutingEntries);
|
||||
return builder.build(maybeUpdatedState);
|
||||
}
|
||||
|
||||
// nothing to process (a previous event has processed it already)
|
||||
if (shardRoutingEntries.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
List<ShardRouting> shardRoutingToBeApplied = new ArrayList<>(shardRoutingEntries.size());
|
||||
|
||||
// mark all entries as processed
|
||||
for (ShardRoutingEntry entry : shardRoutingEntries) {
|
||||
entry.processed = true;
|
||||
shardRoutingToBeApplied.add(entry.shardRouting);
|
||||
}
|
||||
|
||||
if (shardRoutingToBeApplied.isEmpty()) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.applyStartedShards(currentState, shardRoutingToBeApplied, true);
|
||||
if (!routingResult.changed()) {
|
||||
return currentState;
|
||||
}
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
}
|
||||
|
||||
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
|
||||
|
@ -266,8 +248,6 @@ public class ShardStateAction extends AbstractComponent {
|
|||
String message;
|
||||
Throwable failure;
|
||||
|
||||
volatile boolean processed; // state field, no need to serialize
|
||||
|
||||
public ShardRoutingEntry() {
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.LongArrayList;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -32,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeFilters;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -60,8 +62,8 @@ import static org.elasticsearch.common.settings.Settings.*;
|
|||
public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuilder<IndexMetaData>, ToXContent {
|
||||
|
||||
public static final IndexMetaData PROTO = IndexMetaData.builder("")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(0).build();
|
||||
|
||||
public interface Custom extends Diffable<Custom>, ToXContent {
|
||||
|
||||
|
@ -168,6 +170,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
public static final String KEY_ACTIVE_ALLOCATIONS = "active_allocations";
|
||||
static final String KEY_VERSION = "version";
|
||||
static final String KEY_SETTINGS = "settings";
|
||||
static final String KEY_STATE = "state";
|
||||
static final String KEY_MAPPINGS = "mappings";
|
||||
static final String KEY_ALIASES = "aliases";
|
||||
static final String KEY_PRIMARY_TERMS = "primary_terms";
|
||||
|
||||
private final int numberOfShards;
|
||||
private final int numberOfReplicas;
|
||||
|
||||
|
@ -185,6 +195,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
|
||||
private final ImmutableOpenMap<String, Custom> customs;
|
||||
|
||||
private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
|
||||
|
||||
private transient final int totalNumberOfShards;
|
||||
|
||||
private final DiscoveryNodeFilters requireFilters;
|
||||
|
@ -195,67 +207,31 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
private final Version indexUpgradedVersion;
|
||||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
|
||||
private IndexMetaData(String index, long version, long[] primaryTerms, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
|
||||
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
|
||||
if (maybeNumberOfShards == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
|
||||
}
|
||||
int numberOfShards = maybeNumberOfShards;
|
||||
if (numberOfShards <= 0) {
|
||||
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
|
||||
}
|
||||
private IndexMetaData(String index, long version, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings,
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> activeAllocationIds,
|
||||
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
|
||||
Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion) {
|
||||
|
||||
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (maybeNumberOfReplicas == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
|
||||
}
|
||||
int numberOfReplicas = maybeNumberOfReplicas;
|
||||
if (numberOfReplicas < 0) {
|
||||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
this.settings = settings;
|
||||
this.index = index;
|
||||
this.version = version;
|
||||
this.primaryTerms = primaryTerms;
|
||||
assert primaryTerms.length == numberOfShards;
|
||||
this.state = state;
|
||||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.numberOfShards = numberOfShards;
|
||||
this.numberOfReplicas = numberOfReplicas;
|
||||
this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
|
||||
this.settings = settings;
|
||||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.aliases = aliases;
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
} else {
|
||||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
indexCreatedVersion = Version.indexCreated(settings);
|
||||
indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
this.minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE + "] setting", ex);
|
||||
}
|
||||
} else {
|
||||
this.minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
this.activeAllocationIds = activeAllocationIds;
|
||||
this.requireFilters = requireFilters;
|
||||
this.includeFilters = includeFilters;
|
||||
this.excludeFilters = excludeFilters;
|
||||
this.indexCreatedVersion = indexCreatedVersion;
|
||||
this.indexUpgradedVersion = indexUpgradedVersion;
|
||||
this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion;
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
|
@ -377,6 +353,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return (T) customs.get(type);
|
||||
}
|
||||
|
||||
public ImmutableOpenIntMap<Set<String>> getActiveAllocationIds() {
|
||||
return activeAllocationIds;
|
||||
}
|
||||
|
||||
public Set<String> activeAllocationIds(int shardId) {
|
||||
assert shardId >= 0 && shardId < numberOfShards;
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public DiscoveryNodeFilters requireFilters() {
|
||||
return requireFilters;
|
||||
|
@ -429,6 +414,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
if (Arrays.equals(primaryTerms, that.primaryTerms) == false) {
|
||||
return false;
|
||||
}
|
||||
if (!activeAllocationIds.equals(that.activeAllocationIds)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -442,6 +430,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
result = 31 * result + mappings.hashCode();
|
||||
result = 31 * result + customs.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(primaryTerms);
|
||||
result = 31 * result + activeAllocationIds.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -476,7 +465,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
private final Settings settings;
|
||||
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
|
||||
private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
|
||||
private Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
private final Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
private final Diff<ImmutableOpenIntMap<Set<String>>> activeAllocationIds;
|
||||
|
||||
public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) {
|
||||
index = after.index;
|
||||
|
@ -484,9 +474,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
state = after.state;
|
||||
settings = after.settings;
|
||||
primaryTerms = after.primaryTerms;
|
||||
mappings = DiffableUtils.diff(before.mappings, after.mappings);
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
|
||||
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
activeAllocationIds = DiffableUtils.diff(before.activeAllocationIds, after.activeAllocationIds,
|
||||
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
public IndexMetaDataDiff(StreamInput in) throws IOException {
|
||||
|
@ -495,19 +487,22 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
state = State.fromId(in.readByte());
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
primaryTerms = in.readVLongArray();
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader<Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
mappings = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), MappingMetaData.PROTO);
|
||||
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
@Override
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
activeAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
|
||||
DiffableUtils.StringSetValueSerializer.getInstance());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -520,6 +515,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
mappings.writeTo(out);
|
||||
aliases.writeTo(out);
|
||||
customs.writeTo(out);
|
||||
activeAllocationIds.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -532,6 +528,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
builder.mappings.putAll(mappings.apply(part.mappings));
|
||||
builder.aliases.putAll(aliases.apply(part.aliases));
|
||||
builder.customs.putAll(customs.apply(part.customs));
|
||||
builder.activeAllocationIds.putAll(activeAllocationIds.apply(part.activeAllocationIds));
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
@ -559,6 +556,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
|
||||
builder.putCustom(type, customIndexMetaData);
|
||||
}
|
||||
int activeAllocationIdsSize = in.readVInt();
|
||||
for (int i = 0; i < activeAllocationIdsSize; i++) {
|
||||
int key = in.readVInt();
|
||||
Set<String> allocationIds = DiffableUtils.StringSetValueSerializer.getInstance().read(in, key);
|
||||
builder.putActiveAllocationIds(key, allocationIds);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -582,6 +585,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
}
|
||||
out.writeVInt(activeAllocationIds.size());
|
||||
for (IntObjectCursor<Set<String>> cursor : activeAllocationIds) {
|
||||
out.writeVInt(cursor.key);
|
||||
DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.value, out);
|
||||
}
|
||||
}
|
||||
|
||||
public static Builder builder(String index) {
|
||||
|
@ -602,12 +610,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
|
||||
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
|
||||
private final ImmutableOpenMap.Builder<String, Custom> customs;
|
||||
private final ImmutableOpenIntMap.Builder<Set<String>> activeAllocationIds;
|
||||
|
||||
public Builder(String index) {
|
||||
this.index = index;
|
||||
this.mappings = ImmutableOpenMap.builder();
|
||||
this.aliases = ImmutableOpenMap.builder();
|
||||
this.customs = ImmutableOpenMap.builder();
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder();
|
||||
}
|
||||
|
||||
public Builder(IndexMetaData indexMetaData) {
|
||||
|
@ -619,6 +629,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
|
||||
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
|
||||
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
|
||||
this.activeAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.activeAllocationIds);
|
||||
}
|
||||
|
||||
public String index() {
|
||||
|
@ -726,6 +737,15 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return this.customs.get(type);
|
||||
}
|
||||
|
||||
public Builder putActiveAllocationIds(int shardId, Set<String> allocationIds) {
|
||||
activeAllocationIds.put(shardId, new HashSet(allocationIds));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Set<String> getActiveAllocationIds(int shardId) {
|
||||
return activeAllocationIds.get(shardId);
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
@ -783,40 +803,96 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
}
|
||||
|
||||
if (primaryTerms == null) {
|
||||
initializePrimaryTerms();
|
||||
} else if (primaryTerms.length != numberOfShards()) {
|
||||
throw new IllegalStateException("primaryTerms length is [" + primaryTerms.length
|
||||
+ "] but should be equal to number of shards [" + numberOfShards() + "]");
|
||||
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
|
||||
if (maybeNumberOfShards == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
|
||||
}
|
||||
int numberOfShards = maybeNumberOfShards;
|
||||
if (numberOfShards <= 0) {
|
||||
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
return new IndexMetaData(index, version, primaryTerms, state, tmpSettings, mappings.build(), tmpAliases.build(), customs.build());
|
||||
}
|
||||
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (maybeNumberOfReplicas == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
|
||||
}
|
||||
int numberOfReplicas = maybeNumberOfReplicas;
|
||||
if (numberOfReplicas < 0) {
|
||||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString VERSION = new XContentBuilderString("version");
|
||||
static final XContentBuilderString SETTINGS = new XContentBuilderString("settings");
|
||||
static final XContentBuilderString STATE = new XContentBuilderString("state");
|
||||
static final XContentBuilderString MAPPINGS = new XContentBuilderString("mappings");
|
||||
static final XContentBuilderString ALIASES = new XContentBuilderString("aliases");
|
||||
static final XContentBuilderString PRIMARY_TERMS = new XContentBuilderString("primary_terms");
|
||||
// fill missing slots in activeAllocationIds with empty set if needed and make all entries immutable
|
||||
ImmutableOpenIntMap.Builder<Set<String>> filledActiveAllocationIds = ImmutableOpenIntMap.builder();
|
||||
for (int i = 0; i < numberOfShards; i++) {
|
||||
if (activeAllocationIds.containsKey(i)) {
|
||||
filledActiveAllocationIds.put(i, Collections.unmodifiableSet(new HashSet<>(activeAllocationIds.get(i))));
|
||||
} else {
|
||||
filledActiveAllocationIds.put(i, Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
final DiscoveryNodeFilters requireFilters;
|
||||
if (requireMap.isEmpty()) {
|
||||
requireFilters = null;
|
||||
} else {
|
||||
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
|
||||
}
|
||||
Map<String, String> includeMap = settings.getByPrefix("index.routing.allocation.include.").getAsMap();
|
||||
final DiscoveryNodeFilters includeFilters;
|
||||
if (includeMap.isEmpty()) {
|
||||
includeFilters = null;
|
||||
} else {
|
||||
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
|
||||
}
|
||||
Map<String, String> excludeMap = settings.getByPrefix("index.routing.allocation.exclude.").getAsMap();
|
||||
final DiscoveryNodeFilters excludeFilters;
|
||||
if (excludeMap.isEmpty()) {
|
||||
excludeFilters = null;
|
||||
} else {
|
||||
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
|
||||
}
|
||||
Version indexCreatedVersion = Version.indexCreated(settings);
|
||||
Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion);
|
||||
String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE);
|
||||
final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
if (stringLuceneVersion != null) {
|
||||
try {
|
||||
minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion);
|
||||
} catch (ParseException ex) {
|
||||
throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE + "] setting", ex);
|
||||
}
|
||||
} else {
|
||||
minimumCompatibleLuceneVersion = null;
|
||||
}
|
||||
|
||||
if (primaryTerms == null) {
|
||||
initializePrimaryTerms();
|
||||
} else if (primaryTerms.length != numberOfShards) {
|
||||
throw new IllegalStateException("primaryTerms length is [" + primaryTerms.length
|
||||
+ "] but should be equal to number of shards [" + numberOfShards() + "]");
|
||||
}
|
||||
|
||||
return new IndexMetaData(index, version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
|
||||
tmpAliases.build(), customs.build(), filledActiveAllocationIds.build(), requireFilters, includeFilters, excludeFilters,
|
||||
indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion);
|
||||
}
|
||||
|
||||
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject(indexMetaData.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
builder.field(Fields.VERSION, indexMetaData.getVersion());
|
||||
builder.field(Fields.STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
|
||||
builder.field(KEY_VERSION, indexMetaData.getVersion());
|
||||
builder.field(KEY_STATE, indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
|
||||
|
||||
boolean binary = params.paramAsBoolean("binary", false);
|
||||
|
||||
builder.startObject(Fields.SETTINGS);
|
||||
builder.startObject(KEY_SETTINGS);
|
||||
for (Map.Entry<String, String> entry : indexMetaData.getSettings().getAsMap().entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startArray(Fields.MAPPINGS);
|
||||
builder.startArray(KEY_MAPPINGS);
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
|
||||
if (binary) {
|
||||
builder.value(cursor.value.source().compressed());
|
||||
|
@ -836,24 +912,29 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.startObject(Fields.ALIASES);
|
||||
builder.startObject(KEY_ALIASES);
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
|
||||
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startArray(Fields.PRIMARY_TERMS);
|
||||
builder.startArray(KEY_PRIMARY_TERMS);
|
||||
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
|
||||
builder.value(indexMetaData.primaryTerm(i));
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(KEY_ACTIVE_ALLOCATIONS);
|
||||
for (IntObjectCursor<Set<String>> cursor : indexMetaData.activeAllocationIds) {
|
||||
builder.startArray(String.valueOf(cursor.key));
|
||||
for (String allocationId : cursor.value) {
|
||||
builder.value(allocationId);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
// TODO move it somewhere where it will be useful for other code?
|
||||
private static boolean fieldEquals(XContentBuilderString field, String currentFieldName) {
|
||||
return field.underscore().getValue().equals(currentFieldName);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
public static IndexMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
|
@ -871,9 +952,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (fieldEquals(Fields.SETTINGS, currentFieldName)) {
|
||||
if (KEY_SETTINGS.equals(currentFieldName)) {
|
||||
builder.settings(Settings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())));
|
||||
} else if (fieldEquals(Fields.MAPPINGS, currentFieldName)) {
|
||||
} else if (KEY_MAPPINGS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
|
@ -883,10 +964,25 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
builder.putMapping(new MappingMetaData(mappingType, mappingSource));
|
||||
}
|
||||
}
|
||||
} else if (fieldEquals(Fields.ALIASES, currentFieldName)) {
|
||||
} else if (KEY_ALIASES.equals(currentFieldName)) {
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
|
||||
}
|
||||
} else if (KEY_ACTIVE_ALLOCATIONS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
String shardId = currentFieldName;
|
||||
Set<String> allocationIds = new HashSet<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
allocationIds.add(parser.text());
|
||||
}
|
||||
}
|
||||
builder.putActiveAllocationIds(Integer.valueOf(shardId), allocationIds);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// check if its a custom index metadata
|
||||
Custom proto = lookupPrototype(currentFieldName);
|
||||
|
@ -899,7 +995,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (fieldEquals(Fields.MAPPINGS, currentFieldName)) {
|
||||
if (KEY_MAPPINGS.equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
builder.putMapping(new MappingMetaData(new CompressedXContent(parser.binaryValue())));
|
||||
|
@ -911,21 +1007,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if (fieldEquals(Fields.PRIMARY_TERMS, currentFieldName)) {
|
||||
} else if (KEY_PRIMARY_TERMS.equals(currentFieldName)) {
|
||||
LongArrayList list = new LongArrayList();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
list.add(parser.longValue());
|
||||
} else {
|
||||
throw new IllegalStateException("found a non-numeric value under [" + Fields.PRIMARY_TERMS.underscore() + "]");
|
||||
throw new IllegalStateException("found a non-numeric value under [" + KEY_PRIMARY_TERMS + "]");
|
||||
}
|
||||
}
|
||||
builder.primaryTerms(list.toArray());
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if (fieldEquals(Fields.STATE, currentFieldName)) {
|
||||
if (KEY_STATE.equals(currentFieldName)) {
|
||||
builder.state(State.fromString(parser.text()));
|
||||
} else if (fieldEquals(Fields.VERSION, currentFieldName)) {
|
||||
} else if (KEY_VERSION.equals(currentFieldName)) {
|
||||
builder.version(parser.longValue());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.util.CollectionUtil;
|
|||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.DiffableUtils.KeyedReader;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
|
@ -41,6 +40,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
|
@ -54,7 +54,6 @@ import org.elasticsearch.discovery.DiscoverySettings;
|
|||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
@ -640,9 +639,9 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
version = after.version;
|
||||
transientSettings = after.transientSettings;
|
||||
persistentSettings = after.persistentSettings;
|
||||
indices = DiffableUtils.diff(before.indices, after.indices);
|
||||
templates = DiffableUtils.diff(before.templates, after.templates);
|
||||
customs = DiffableUtils.diff(before.customs, after.customs);
|
||||
indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer());
|
||||
templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer());
|
||||
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public MetaDataDiff(StreamInput in) throws IOException {
|
||||
|
@ -650,16 +649,17 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
version = in.readLong();
|
||||
transientSettings = Settings.readSettingsFromStream(in);
|
||||
persistentSettings = Settings.readSettingsFromStream(in);
|
||||
indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO);
|
||||
templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader<Custom>() {
|
||||
indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexMetaData.PROTO);
|
||||
templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexTemplateMetaData.PROTO);
|
||||
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
|
||||
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
|
||||
@Override
|
||||
public Custom readFrom(StreamInput in, String key) throws IOException {
|
||||
public Custom read(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Diff<Custom> readDiffFrom(StreamInput in, String key) throws IOException {
|
||||
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
|
||||
return lookupPrototypeSafe(key).readDiffFrom(in);
|
||||
}
|
||||
});
|
||||
|
@ -1029,12 +1029,18 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
|
||||
for (ObjectObjectCursor<String, AliasMetaData> aliasCursor : indexMetaData.getAliases()) {
|
||||
AliasMetaData aliasMetaData = aliasCursor.value;
|
||||
AliasOrIndex.Alias aliasOrIndex = (AliasOrIndex.Alias) aliasAndIndexLookup.get(aliasMetaData.getAlias());
|
||||
AliasOrIndex aliasOrIndex = aliasAndIndexLookup.get(aliasMetaData.getAlias());
|
||||
if (aliasOrIndex == null) {
|
||||
aliasOrIndex = new AliasOrIndex.Alias(aliasMetaData, indexMetaData);
|
||||
aliasAndIndexLookup.put(aliasMetaData.getAlias(), aliasOrIndex);
|
||||
} else if (aliasOrIndex instanceof AliasOrIndex.Alias) {
|
||||
AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
|
||||
alias.addIndex(indexMetaData);
|
||||
} else if (aliasOrIndex instanceof AliasOrIndex.Index) {
|
||||
AliasOrIndex.Index index = (AliasOrIndex.Index) aliasOrIndex;
|
||||
throw new IllegalStateException("index and alias names need to be unique, but alias [" + aliasMetaData.getAlias() + "] and index [" + index.getIndex().getIndex() + "] have the same name");
|
||||
} else {
|
||||
aliasOrIndex.addIndex(indexMetaData);
|
||||
throw new IllegalStateException("unexpected alias [" + aliasMetaData.getAlias() + "][" + aliasOrIndex + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -170,12 +170,12 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
request.settings(updatedSettingsBuilder.build());
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
|
@ -299,7 +299,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
// Set up everything, now locally create the index to see that things are ok, and apply
|
||||
final IndexMetaData tmpImd = IndexMetaData.builder(request.index()).settings(actualIndexSettings).build();
|
||||
// create the index here (on the master) to validate it can be created, as well as adding the mapping
|
||||
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.EMPTY_LIST);
|
||||
indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
|
||||
indexCreated = true;
|
||||
// now add the mappings
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.index());
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -71,7 +70,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
Collection<String> indices = Arrays.asList(request.indices);
|
||||
final DeleteIndexListener listener = new DeleteIndexListener(userListener);
|
||||
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
|
|
@ -62,7 +62,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
|
@ -99,7 +99,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
if (indexService == null) {
|
||||
// temporarily create the index and add mappings so we can parse the filter
|
||||
try {
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
final String indicesAsString = Arrays.toString(request.indices());
|
||||
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
|
@ -140,7 +140,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
final String indicesAsString = Arrays.toString(request.indices());
|
||||
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
|
|
|
@ -56,7 +56,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
|
||||
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
@ -143,7 +143,8 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
}
|
||||
final IndexTemplateMetaData template = templateBuilder.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]",
|
||||
new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
@ -216,6 +217,9 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
for (Alias alias : request.aliases) {
|
||||
//we validate the alias only partially, as we don't know yet to which index it'll get applied to
|
||||
aliasValidator.validateAliasStandalone(alias);
|
||||
if (request.template.equals(alias.name())) {
|
||||
throw new IllegalArgumentException("Alias [" + alias.name() + "] cannot be the same as the template pattern [" + request.template + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -218,8 +218,8 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
try {
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST);
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.EMPTY_MAP);
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.emptyList());
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
|
||||
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
|
||||
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry)) {
|
||||
|
@ -231,7 +231,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
}
|
||||
} catch (Exception ex) {
|
||||
// Wrap the inner exception so we have the index name in the exception message
|
||||
throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "], reason: [" + ex.getMessage() + "]", ex);
|
||||
throw new IllegalStateException("unable to upgrade the mappings for the index [" + indexMetaData.getIndex() + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -256,7 +256,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
};
|
||||
|
||||
public FakeAnalysisService(IndexSettings indexSettings) {
|
||||
super(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
|
||||
super(indexSettings, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,28 +22,27 @@ package org.elasticsearch.cluster.metadata;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
/**
|
||||
* Service responsible for submitting mapping changes
|
||||
|
@ -53,13 +52,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
private final ClusterService clusterService;
|
||||
private final IndicesService indicesService;
|
||||
|
||||
// the mutex protect all the refreshOrUpdate variables!
|
||||
private final Object refreshOrUpdateMutex = new Object();
|
||||
private final List<MappingTask> refreshOrUpdateQueue = new ArrayList<>();
|
||||
private long refreshOrUpdateInsertOrder;
|
||||
private long refreshOrUpdateProcessedInsertOrder;
|
||||
final ClusterStateTaskExecutor<RefreshTask> refreshExecutor = new RefreshTaskExecutor();
|
||||
final ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> putMappingExecutor = new PutMappingExecutor();
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
|
||||
|
||||
@Inject
|
||||
public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
|
||||
super(settings);
|
||||
|
@ -68,89 +65,44 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
}
|
||||
|
||||
static class MappingTask {
|
||||
static class RefreshTask {
|
||||
final String index;
|
||||
final String indexUUID;
|
||||
|
||||
MappingTask(String index, final String indexUUID) {
|
||||
RefreshTask(String index, final String indexUUID) {
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
}
|
||||
}
|
||||
|
||||
static class RefreshTask extends MappingTask {
|
||||
final String[] types;
|
||||
|
||||
RefreshTask(String index, final String indexUUID, String[] types) {
|
||||
super(index, indexUUID);
|
||||
this.types = types;
|
||||
}
|
||||
}
|
||||
|
||||
static class UpdateTask extends MappingTask {
|
||||
final String type;
|
||||
final CompressedXContent mappingSource;
|
||||
final String nodeId; // null fr unknown
|
||||
final ActionListener<ClusterStateUpdateResponse> listener;
|
||||
|
||||
UpdateTask(String index, String indexUUID, String type, CompressedXContent mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
super(index, indexUUID);
|
||||
this.type = type;
|
||||
this.mappingSource = mappingSource;
|
||||
this.nodeId = nodeId;
|
||||
this.listener = listener;
|
||||
class RefreshTaskExecutor implements ClusterStateTaskExecutor<RefreshTask> {
|
||||
@Override
|
||||
public BatchResult<RefreshTask> execute(ClusterState currentState, List<RefreshTask> tasks) throws Exception {
|
||||
ClusterState newClusterState = executeRefresh(currentState, tasks);
|
||||
return BatchResult.<RefreshTask>builder().successes(tasks).build(newClusterState);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch method to apply all the queued refresh or update operations. The idea is to try and batch as much
|
||||
* Batch method to apply all the queued refresh operations. The idea is to try and batch as much
|
||||
* as possible so we won't create the same index all the time for example for the updates on the same mapping
|
||||
* and generate a single cluster change event out of all of those.
|
||||
*/
|
||||
Tuple<ClusterState, List<MappingTask>> executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception {
|
||||
final List<MappingTask> allTasks = new ArrayList<>();
|
||||
|
||||
synchronized (refreshOrUpdateMutex) {
|
||||
if (refreshOrUpdateQueue.isEmpty()) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
// we already processed this task in a bulk manner in a previous cluster event, simply ignore
|
||||
// it so we will let other tasks get in and processed ones, we will handle the queued ones
|
||||
// later on in a subsequent cluster state event
|
||||
if (insertionOrder < refreshOrUpdateProcessedInsertOrder) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
allTasks.addAll(refreshOrUpdateQueue);
|
||||
refreshOrUpdateQueue.clear();
|
||||
|
||||
refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder;
|
||||
}
|
||||
|
||||
if (allTasks.isEmpty()) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception {
|
||||
// break down to tasks per index, so we can optimize the on demand index service creation
|
||||
// to only happen for the duration of a single index processing of its respective events
|
||||
Map<String, List<MappingTask>> tasksPerIndex = new HashMap<>();
|
||||
for (MappingTask task : allTasks) {
|
||||
Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>();
|
||||
for (RefreshTask task : allTasks) {
|
||||
if (task.index == null) {
|
||||
logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
|
||||
}
|
||||
List<MappingTask> indexTasks = tasksPerIndex.get(task.index);
|
||||
if (indexTasks == null) {
|
||||
indexTasks = new ArrayList<>();
|
||||
tasksPerIndex.put(task.index, indexTasks);
|
||||
}
|
||||
indexTasks.add(task);
|
||||
tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task);
|
||||
}
|
||||
|
||||
boolean dirty = false;
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) {
|
||||
for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
IndexMetaData indexMetaData = mdBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
|
@ -160,14 +112,17 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
|
||||
// the latest (based on order) update mapping one per node
|
||||
List<MappingTask> allIndexTasks = entry.getValue();
|
||||
List<MappingTask> tasks = new ArrayList<>();
|
||||
for (MappingTask task : allIndexTasks) {
|
||||
if (!indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
List<RefreshTask> allIndexTasks = entry.getValue();
|
||||
boolean hasTaskWithRightUUID = false;
|
||||
for (RefreshTask task : allIndexTasks) {
|
||||
if (indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
hasTaskWithRightUUID = true;
|
||||
} else {
|
||||
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
|
||||
continue;
|
||||
}
|
||||
tasks.add(task);
|
||||
}
|
||||
if (hasTaskWithRightUUID == false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// construct the actual index if needed, and make sure the relevant mappings are there
|
||||
|
@ -175,28 +130,17 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
// we need to create the index here, and add the current mapping to it, so we can merge
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
removeIndex = true;
|
||||
Set<String> typesToIntroduce = new HashSet<>();
|
||||
for (MappingTask task : tasks) {
|
||||
if (task instanceof UpdateTask) {
|
||||
typesToIntroduce.add(((UpdateTask) task).type);
|
||||
} else if (task instanceof RefreshTask) {
|
||||
Collections.addAll(typesToIntroduce, ((RefreshTask) task).types);
|
||||
}
|
||||
}
|
||||
for (String type : typesToIntroduce) {
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.getMappings().containsKey(type)) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(type, indexMetaData.getMappings().get(type).source(), false, true);
|
||||
}
|
||||
for (ObjectCursor<MappingMetaData> metaData : indexMetaData.getMappings().values()) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(metaData.value.type(), metaData.value.source(), false, true);
|
||||
}
|
||||
}
|
||||
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
|
||||
try {
|
||||
boolean indexDirty = processIndexMappingTasks(tasks, indexService, builder);
|
||||
boolean indexDirty = refreshIndexMapping(indexService, builder);
|
||||
if (indexDirty) {
|
||||
mdBuilder.put(builder);
|
||||
dirty = true;
|
||||
|
@ -209,81 +153,33 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
|
||||
if (!dirty) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
return currentState;
|
||||
}
|
||||
return Tuple.tuple(ClusterState.builder(currentState).metaData(mdBuilder).build(), allTasks);
|
||||
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
||||
}
|
||||
|
||||
private boolean processIndexMappingTasks(List<MappingTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
|
||||
private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Builder builder) {
|
||||
boolean dirty = false;
|
||||
String index = indexService.index().name();
|
||||
// keep track of what we already refreshed, no need to refresh it again...
|
||||
Set<String> processedRefreshes = new HashSet<>();
|
||||
for (MappingTask task : tasks) {
|
||||
if (task instanceof RefreshTask) {
|
||||
RefreshTask refreshTask = (RefreshTask) task;
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (String type : refreshTask.types) {
|
||||
if (processedRefreshes.contains(type)) {
|
||||
continue;
|
||||
}
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
|
||||
if (mapper == null) {
|
||||
continue;
|
||||
}
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
processedRefreshes.add(type);
|
||||
}
|
||||
|
||||
if (updatedTypes.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
final String type = mapper.type();
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
}
|
||||
} else if (task instanceof UpdateTask) {
|
||||
UpdateTask updateTask = (UpdateTask) task;
|
||||
try {
|
||||
String type = updateTask.type;
|
||||
CompressedXContent mappingSource = updateTask.mappingSource;
|
||||
|
||||
MappingMetaData mappingMetaData = builder.mapping(type);
|
||||
if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
|
||||
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type);
|
||||
continue;
|
||||
}
|
||||
|
||||
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false, true);
|
||||
processedRefreshes.add(type);
|
||||
|
||||
// if we end up with the same mapping as the original once, ignore
|
||||
if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) {
|
||||
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type);
|
||||
continue;
|
||||
}
|
||||
|
||||
// build the updated mapping source
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource());
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
|
||||
}
|
||||
|
||||
builder.putMapping(new MappingMetaData(updatedMapper));
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type);
|
||||
}
|
||||
} else {
|
||||
logger.warn("illegal state, got wrong mapping task type [{}]", task);
|
||||
}
|
||||
|
||||
// if a single type is not up-to-date, re-send everything
|
||||
if (updatedTypes.isEmpty() == false) {
|
||||
logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state", t, index);
|
||||
}
|
||||
return dirty;
|
||||
}
|
||||
|
@ -291,198 +187,198 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
/**
|
||||
* Refreshes mappings if they are not the same between original and parsed version
|
||||
*/
|
||||
public void refreshMapping(final String index, final String indexUUID, final String... types) {
|
||||
final long insertOrder;
|
||||
synchronized (refreshOrUpdateMutex) {
|
||||
insertOrder = ++refreshOrUpdateInsertOrder;
|
||||
refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));
|
||||
}
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
private volatile List<MappingTask> allTasks;
|
||||
public void refreshMapping(final String index, final String indexUUID) {
|
||||
final RefreshTask refreshTask = new RefreshTask(index, indexUUID);
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "]",
|
||||
refreshTask,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
refreshExecutor,
|
||||
(source, t) -> logger.warn("failure during [{}]", t, source)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.warn("failure during [{}]", t, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Tuple<ClusterState, List<MappingTask>> tuple = executeRefreshOrUpdate(currentState, insertOrder);
|
||||
this.allTasks = tuple.v2();
|
||||
return tuple.v1();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (allTasks == null) {
|
||||
return;
|
||||
class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
|
||||
@Override
|
||||
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
|
||||
Set<String> indicesToClose = new HashSet<>();
|
||||
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
|
||||
try {
|
||||
// precreate incoming indices;
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
// failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
|
||||
for (String index : request.indices()) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData != null && indicesService.hasIndex(index) == false) {
|
||||
// if we don't have the index, we will throw exceptions later;
|
||||
indicesToClose.add(index);
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.emptyList());
|
||||
// add mappings for all types, we need them for cross-type validation
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(), false, request.updateAllTypes());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Object task : allTasks) {
|
||||
if (task instanceof UpdateTask) {
|
||||
UpdateTask uTask = (UpdateTask) task;
|
||||
ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true);
|
||||
uTask.listener.onResponse(response);
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
try {
|
||||
currentState = applyRequest(currentState, request);
|
||||
builder.success(request);
|
||||
} catch (Throwable t) {
|
||||
builder.failure(request, t);
|
||||
}
|
||||
}
|
||||
|
||||
return builder.build(currentState);
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
|
||||
Map<String, DocumentMapper> newMappers = new HashMap<>();
|
||||
Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
|
||||
}
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
// new types all at once , which can create a false error.
|
||||
|
||||
// For example in MapperService we can't distinguish between a create index api call
|
||||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
newMappers.put(index, newMapper);
|
||||
if (existingMapper != null) {
|
||||
existingMappers.put(index, existingMapper);
|
||||
}
|
||||
}
|
||||
|
||||
String mappingType = request.type();
|
||||
if (mappingType == null) {
|
||||
mappingType = newMappers.values().iterator().next().type();
|
||||
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
DocumentMapper newMapper = entry.getValue();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
if (existingSource.equals(updatedSource)) {
|
||||
// same source, no changes, ignore it
|
||||
} else {
|
||||
// use the merged mapping source
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if (mappings.isEmpty()) {
|
||||
// no changes, return
|
||||
return currentState;
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String indexName : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(indexName);
|
||||
}
|
||||
MappingMetaData mappingMd = mappings.get(indexName);
|
||||
if (mappingMd != null) {
|
||||
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
||||
}
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(builder).build();
|
||||
}
|
||||
}
|
||||
|
||||
public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]",
|
||||
request,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()),
|
||||
putMappingExecutor,
|
||||
new AckedClusterStateTaskListener() {
|
||||
|
||||
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) throws Exception {
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
try {
|
||||
for (String index : request.indices()) {
|
||||
if (!currentState.metaData().hasIndex(index)) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
// pre create indices here and add mappings to them so we can merge the mappings here if needed
|
||||
for (String index : request.indices()) {
|
||||
if (indicesService.hasIndex(index)) {
|
||||
continue;
|
||||
}
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
// make sure to add custom default mapping if exists
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
||||
}
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.getMappings().containsKey(request.type())) {
|
||||
indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
||||
}
|
||||
@Override
|
||||
public boolean mustAck(DiscoveryNode discoveryNode) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Map<String, DocumentMapper> newMappers = new HashMap<>();
|
||||
Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new MergeMappingException(mergeResult.buildConflicts());
|
||||
}
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
// new types all at once , which can create a false error.
|
||||
|
||||
// For example in MapperService we can't distinguish between a create index api call
|
||||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
newMappers.put(index, newMapper);
|
||||
if (existingMapper != null) {
|
||||
existingMappers.put(index, existingMapper);
|
||||
}
|
||||
@Override
|
||||
public void onAllNodesAcked(@Nullable Throwable t) {
|
||||
listener.onResponse(new ClusterStateUpdateResponse(true));
|
||||
}
|
||||
|
||||
String mappingType = request.type();
|
||||
if (mappingType == null) {
|
||||
mappingType = newMappers.values().iterator().next().type();
|
||||
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterStateUpdateResponse(false));
|
||||
}
|
||||
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
DocumentMapper newMapper = entry.getValue();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
if (existingSource.equals(updatedSource)) {
|
||||
// same source, no changes, ignore it
|
||||
} else {
|
||||
// use the merged mapping source
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public TimeValue ackTimeout() {
|
||||
return request.ackTimeout();
|
||||
}
|
||||
|
||||
if (mappings.isEmpty()) {
|
||||
// no changes, return
|
||||
return currentState;
|
||||
}
|
||||
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String indexName : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(indexName);
|
||||
}
|
||||
MappingMetaData mappingMd = mappings.get(indexName);
|
||||
if (mappingMd != null) {
|
||||
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
||||
}
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(builder).build();
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,11 +24,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
|
@ -44,13 +40,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
|
@ -219,7 +209,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
final Settings openSettings = updatedSettingsBuilder.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("update-settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("update-settings",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
|
@ -334,7 +325,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
|
||||
|
||||
clusterService.submitStateUpdateTask("update-index-compatibility-versions", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
|
|
|
@ -147,7 +147,7 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
|
|||
return;
|
||||
}
|
||||
logger.trace("rerouting {}", reason);
|
||||
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
rerouting.set(false);
|
||||
|
|
|
@ -308,12 +308,12 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
|
||||
public RoutingTableDiff(RoutingTable before, RoutingTable after) {
|
||||
version = after.version;
|
||||
indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting);
|
||||
indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting, DiffableUtils.getStringKeySerializer());
|
||||
}
|
||||
|
||||
public RoutingTableDiff(StreamInput in) throws IOException {
|
||||
version = in.readLong();
|
||||
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, IndexRoutingTable.PROTO);
|
||||
indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), IndexRoutingTable.PROTO);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -36,9 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.*;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -85,20 +83,18 @@ public class AllocationService extends AbstractComponent {
|
|||
if (withReroute) {
|
||||
reroute(allocation);
|
||||
}
|
||||
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
|
||||
String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(result.metaData(), result.routingTable()),
|
||||
"shards started [" + startedShardsAsString + "] ..."
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.metaData(), result.routingTable()),
|
||||
"shards started [" + startedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
|
||||
protected RoutingAllocation.Result buildChangedResult(MetaData metaData, RoutingNodes routingNodes) {
|
||||
return buildChangedResult(metaData, routingNodes, new RoutingExplanations());
|
||||
|
||||
|
@ -115,10 +111,10 @@ public class AllocationService extends AbstractComponent {
|
|||
*
|
||||
* @param currentMetaData {@link MetaData} object from before the routing table was changed.
|
||||
* @param newRoutingTable new {@link RoutingTable} created by the allocation change
|
||||
* @return adpated {@link MetaData}, potentially the original one if no change was needed.
|
||||
* @return adapted {@link MetaData}, potentially the original one if no change was needed.
|
||||
*/
|
||||
static MetaData updateMetaDataWithRoutingTable(MetaData currentMetaData, RoutingTable newRoutingTable) {
|
||||
// make sure index meta data and routing tables are in sync w.r.t primaryTerm
|
||||
// make sure index meta data and routing tables are in sync w.r.t active allocation ids
|
||||
MetaData.Builder metaDataBuilder = null;
|
||||
for (IndexRoutingTable indexRoutingTable : newRoutingTable) {
|
||||
final IndexMetaData indexMetaData = currentMetaData.index(indexRoutingTable.getIndex());
|
||||
|
@ -127,6 +123,26 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
IndexMetaData.Builder indexMetaDataBuilder = null;
|
||||
for (IndexShardRoutingTable shardRoutings : indexRoutingTable) {
|
||||
|
||||
// update activeAllocationIds
|
||||
Set<String> activeAllocationIds = shardRoutings.activeShards().stream()
|
||||
.map(ShardRouting::allocationId)
|
||||
.filter(Objects::nonNull)
|
||||
.map(AllocationId::getId)
|
||||
.collect(Collectors.toSet());
|
||||
// only update active allocation ids if there is an active shard
|
||||
if (activeAllocationIds.isEmpty() == false) {
|
||||
// get currently stored allocation ids
|
||||
Set<String> storedAllocationIds = indexMetaData.activeAllocationIds(shardRoutings.shardId().id());
|
||||
if (activeAllocationIds.equals(storedAllocationIds) == false) {
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
}
|
||||
indexMetaDataBuilder.putActiveAllocationIds(shardRoutings.shardId().id(), activeAllocationIds);
|
||||
}
|
||||
}
|
||||
|
||||
// update primary terms
|
||||
final ShardRouting primary = shardRoutings.primaryShard();
|
||||
if (primary == null) {
|
||||
throw new IllegalStateException("missing primary shard for " + shardRoutings.shardId());
|
||||
|
@ -134,8 +150,8 @@ public class AllocationService extends AbstractComponent {
|
|||
final int shardId = primary.shardId().id();
|
||||
if (primary.primaryTerm() != indexMetaData.primaryTerm(shardId)) {
|
||||
assert primary.primaryTerm() > indexMetaData.primaryTerm(shardId) :
|
||||
"primary term should only increase. Index primary term ["
|
||||
+ indexMetaData.primaryTerm(shardId) + "] but primary routing is " + primary;
|
||||
"primary term should only increase. Index primary term ["
|
||||
+ indexMetaData.primaryTerm(shardId) + "] but primary routing is " + primary;
|
||||
if (indexMetaDataBuilder == null) {
|
||||
indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
}
|
||||
|
@ -173,20 +189,19 @@ public class AllocationService extends AbstractComponent {
|
|||
boolean changed = false;
|
||||
for (FailedRerouteAllocation.FailedShard failedShard : failedShards) {
|
||||
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
System.nanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
}
|
||||
shardsAllocators.applyFailedShards(allocation);
|
||||
reroute(allocation);
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
|
||||
final RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.shard.shardId().toString());
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), clusterState.routingTable()),
|
||||
"shards failed [" + failedShardsAsString + "] ..."
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"shards failed [" + failedShardsAsString + "] ..."
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
@ -202,10 +217,10 @@ public class AllocationService extends AbstractComponent {
|
|||
private <T> String firstListElementsToCommaDelimitedString(List<T> elements, Function<T, String> formatter) {
|
||||
final int maxNumberOfElements = 10;
|
||||
return elements
|
||||
.stream()
|
||||
.limit(maxNumberOfElements)
|
||||
.map(formatter)
|
||||
.collect(Collectors.joining(", "));
|
||||
.stream()
|
||||
.limit(maxNumberOfElements)
|
||||
.map(formatter)
|
||||
.collect(Collectors.joining(", "));
|
||||
}
|
||||
|
||||
public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands) {
|
||||
|
@ -230,9 +245,9 @@ public class AllocationService extends AbstractComponent {
|
|||
reroute(allocation);
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes, explanations);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"reroute commands"
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
"reroute commands"
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
@ -263,9 +278,9 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
RoutingAllocation.Result result = buildChangedResult(clusterState.metaData(), routingNodes);
|
||||
logClusterHealthStateChange(
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
reason
|
||||
new ClusterStateHealth(clusterState),
|
||||
new ClusterStateHealth(clusterState.getMetaData(), result.routingTable()),
|
||||
reason
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
@ -377,8 +392,8 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
for (ShardRouting shardToFail : shardsToFail) {
|
||||
changed |= applyFailedShard(allocation, shardToFail, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "primary failed while replica initializing",
|
||||
null, allocation.getCurrentNanoTime(), System.currentTimeMillis()));
|
||||
}
|
||||
|
||||
// now, go over and elect a new primary if possible, not, from this code block on, if one is elected,
|
||||
|
@ -440,7 +455,7 @@ public class AllocationService extends AbstractComponent {
|
|||
// now, go over all the shards routing on the node, and fail them
|
||||
for (ShardRouting shardRouting : node.copyShards()) {
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", null,
|
||||
allocation.getCurrentNanoTime(), System.currentTimeMillis());
|
||||
allocation.getCurrentNanoTime(), System.currentTimeMillis());
|
||||
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
|
||||
}
|
||||
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
|
||||
|
|
|
@ -39,12 +39,12 @@ import static java.util.Collections.unmodifiableSet;
|
|||
/**
|
||||
* The {@link RoutingAllocation} keep the state of the current allocation
|
||||
* of shards and holds the {@link AllocationDeciders} which are responsible
|
||||
* for the current routing state.
|
||||
* for the current routing state.
|
||||
*/
|
||||
public class RoutingAllocation {
|
||||
|
||||
/**
|
||||
* this class is used to describe results of a {@link RoutingAllocation}
|
||||
* this class is used to describe results of a {@link RoutingAllocation}
|
||||
*/
|
||||
public static class Result {
|
||||
|
||||
|
@ -58,9 +58,10 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation.Result}
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
*
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
* @param routingTable the {@link RoutingTable} this Result references
|
||||
* @param metaData the {@link MetaData} this result refrences
|
||||
* @param metaData the {@link MetaData} this Result references
|
||||
*/
|
||||
public Result(boolean changed, RoutingTable routingTable, MetaData metaData) {
|
||||
this.changed = changed;
|
||||
|
@ -70,9 +71,10 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation.Result}
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
*
|
||||
* @param changed a flag to determine whether the actual {@link RoutingTable} has been changed
|
||||
* @param routingTable the {@link RoutingTable} this Result references
|
||||
* @param metaData the {@link MetaData} this Result references
|
||||
* @param metaData the {@link MetaData} this Result references
|
||||
* @param explanations Explanation for the reroute actions
|
||||
*/
|
||||
public Result(boolean changed, RoutingTable routingTable, MetaData metaData, RoutingExplanations explanations) {
|
||||
|
@ -82,7 +84,9 @@ public class RoutingAllocation {
|
|||
this.explanations = explanations;
|
||||
}
|
||||
|
||||
/** determine whether the actual {@link RoutingTable} has been changed
|
||||
/**
|
||||
* determine whether the actual {@link RoutingTable} has been changed
|
||||
*
|
||||
* @return <code>true</code> if the {@link RoutingTable} has been changed by allocation. Otherwise <code>false</code>
|
||||
*/
|
||||
public boolean changed() {
|
||||
|
@ -91,6 +95,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get the {@link MetaData} referenced by this result
|
||||
*
|
||||
* @return referenced {@link MetaData}
|
||||
*/
|
||||
public MetaData metaData() {
|
||||
|
@ -99,6 +104,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get the {@link RoutingTable} referenced by this result
|
||||
*
|
||||
* @return referenced {@link RoutingTable}
|
||||
*/
|
||||
public RoutingTable routingTable() {
|
||||
|
@ -107,6 +113,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get the explanation of this result
|
||||
*
|
||||
* @return explanation
|
||||
*/
|
||||
public RoutingExplanations explanations() {
|
||||
|
@ -137,9 +144,10 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Creates a new {@link RoutingAllocation}
|
||||
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
|
||||
* @param routingNodes Routing nodes in the current cluster
|
||||
* @param nodes TODO: Documentation
|
||||
*
|
||||
* @param deciders {@link AllocationDeciders} to used to make decisions for routing allocations
|
||||
* @param routingNodes Routing nodes in the current cluster
|
||||
* @param nodes TODO: Documentation
|
||||
* @param currentNanoTime the nano time to use for all delay allocation calculation (typically {@link System#nanoTime()})
|
||||
*/
|
||||
public RoutingAllocation(AllocationDeciders deciders, RoutingNodes routingNodes, DiscoveryNodes nodes, ClusterInfo clusterInfo, long currentNanoTime) {
|
||||
|
@ -157,6 +165,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get {@link AllocationDeciders} used for allocation
|
||||
*
|
||||
* @return {@link AllocationDeciders} used for allocation
|
||||
*/
|
||||
public AllocationDeciders deciders() {
|
||||
|
@ -165,6 +174,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get routing table of current nodes
|
||||
*
|
||||
* @return current routing table
|
||||
*/
|
||||
public RoutingTable routingTable() {
|
||||
|
@ -173,6 +183,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get current routing nodes
|
||||
*
|
||||
* @return routing nodes
|
||||
*/
|
||||
public RoutingNodes routingNodes() {
|
||||
|
@ -181,6 +192,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get metadata of routing nodes
|
||||
*
|
||||
* @return Metadata of routing nodes
|
||||
*/
|
||||
public MetaData metaData() {
|
||||
|
@ -189,6 +201,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get discovery nodes in current routing
|
||||
*
|
||||
* @return discovery nodes
|
||||
*/
|
||||
public DiscoveryNodes nodes() {
|
||||
|
@ -201,6 +214,7 @@ public class RoutingAllocation {
|
|||
|
||||
/**
|
||||
* Get explanations of current routing
|
||||
*
|
||||
* @return explanation of routing
|
||||
*/
|
||||
public AllocationExplanation explanation() {
|
||||
|
@ -257,10 +271,11 @@ public class RoutingAllocation {
|
|||
/**
|
||||
* Create a routing decision, including the reason if the debug flag is
|
||||
* turned on
|
||||
* @param decision decision whether to allow/deny allocation
|
||||
*
|
||||
* @param decision decision whether to allow/deny allocation
|
||||
* @param deciderLabel a human readable label for the AllocationDecider
|
||||
* @param reason a format string explanation of the decision
|
||||
* @param params format string parameters
|
||||
* @param reason a format string explanation of the decision
|
||||
* @param params format string parameters
|
||||
*/
|
||||
public Decision decision(Decision decision, String deciderLabel, String reason, Object... params) {
|
||||
if (debugDecision()) {
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -51,6 +52,8 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
|
||||
|
@ -87,6 +90,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Map<ClusterStateTaskExecutor, List<UpdateTask>> updateTasksPerExecutor = new HashMap<>();
|
||||
// TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API
|
||||
private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>();
|
||||
private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners);
|
||||
|
@ -265,30 +269,34 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
@Override
|
||||
public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
|
||||
submitStateUpdateTask(source, Priority.NORMAL, updateTask);
|
||||
submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask) {
|
||||
public <T> void submitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
final ClusterStateTaskExecutor<T> executor,
|
||||
final ClusterStateTaskListener listener
|
||||
) {
|
||||
if (!lifecycle.started()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
final UpdateTask task = new UpdateTask(source, priority, updateTask);
|
||||
if (updateTask.timeout() != null) {
|
||||
updateTasksExecutor.execute(task, threadPool.scheduler(), updateTask.timeout(), new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
updateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(updateTask.timeout(), task.source()));
|
||||
}
|
||||
});
|
||||
final UpdateTask<T> updateTask = new UpdateTask<>(source, task, config, executor, listener);
|
||||
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()).add(updateTask);
|
||||
}
|
||||
|
||||
if (config.timeout() != null) {
|
||||
updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
|
||||
if (updateTask.processed.getAndSet(true) == false) {
|
||||
listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
|
||||
}
|
||||
});
|
||||
}));
|
||||
} else {
|
||||
updateTasksExecutor.execute(task);
|
||||
updateTasksExecutor.execute(updateTask);
|
||||
}
|
||||
} catch (EsRejectedExecutionException e) {
|
||||
// ignore cases where we are shutting down..., there is really nothing interesting
|
||||
|
@ -355,190 +363,240 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
class UpdateTask extends SourcePrioritizedRunnable {
|
||||
|
||||
public final ClusterStateUpdateTask updateTask;
|
||||
|
||||
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
|
||||
super(priority, source);
|
||||
this.updateTask = updateTask;
|
||||
<T> void runTasksForExecutor(ClusterStateTaskExecutor<T> executor) {
|
||||
final ArrayList<UpdateTask<T>> toExecute = new ArrayList<>();
|
||||
final ArrayList<String> sources = new ArrayList<>();
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
List<UpdateTask> pending = updateTasksPerExecutor.remove(executor);
|
||||
if (pending != null) {
|
||||
for (UpdateTask<T> task : pending) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.trace("will process [{}]", task.source);
|
||||
toExecute.add(task);
|
||||
sources.add(task.source);
|
||||
} else {
|
||||
logger.trace("skipping [{}], already processed", task.source);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (toExecute.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
final String source = Strings.collectionToCommaDelimitedString(sources);
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
|
||||
return;
|
||||
}
|
||||
logger.debug("processing [{}]: execute", source);
|
||||
ClusterState previousClusterState = clusterState;
|
||||
if (!previousClusterState.nodes().localNodeMaster() && executor.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", source);
|
||||
toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source));
|
||||
return;
|
||||
}
|
||||
ClusterStateTaskExecutor.BatchResult<T> batchResult;
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
|
||||
batchResult = executor.execute(previousClusterState, inputs);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
|
||||
return;
|
||||
}
|
||||
logger.debug("processing [{}]: execute", source);
|
||||
ClusterState previousClusterState = clusterState;
|
||||
if (!previousClusterState.nodes().localNodeMaster() && updateTask.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", source);
|
||||
updateTask.onNoLongerMaster(source);
|
||||
return;
|
||||
}
|
||||
ClusterState newClusterState;
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
newClusterState = updateTask.execute(previousClusterState);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
updateTask.onFailure(source, e);
|
||||
return;
|
||||
}
|
||||
assert batchResult.executionResults != null;
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
if (updateTask instanceof AckedClusterStateUpdateTask) {
|
||||
ClusterState newClusterState = batchResult.resultingState;
|
||||
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
|
||||
// fail all tasks that have failed and extract those that are waiting for results
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
|
||||
final ClusterStateTaskExecutor.TaskResult executionResult =
|
||||
batchResult.executionResults.get(updateTask.task);
|
||||
executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex));
|
||||
}
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
|
||||
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
|
||||
((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null);
|
||||
}
|
||||
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Discovery.AckListener ackListener = new NoOpAckListener();
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
// only the master controls the version numbers
|
||||
Builder builder = ClusterState.builder(newClusterState).incrementVersion();
|
||||
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
|
||||
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build());
|
||||
}
|
||||
if (previousClusterState.metaData() != newClusterState.metaData()) {
|
||||
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
|
||||
}
|
||||
newClusterState = builder.build();
|
||||
|
||||
if (updateTask instanceof AckedClusterStateUpdateTask) {
|
||||
final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask;
|
||||
if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) {
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
try {
|
||||
ArrayList<Discovery.AckListener> ackListeners = new ArrayList<>();
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
// only the master controls the version numbers
|
||||
Builder builder = ClusterState.builder(newClusterState).incrementVersion();
|
||||
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
|
||||
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build());
|
||||
}
|
||||
if (previousClusterState.metaData() != newClusterState.metaData()) {
|
||||
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
|
||||
}
|
||||
newClusterState = builder.build();
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
final AckedClusterStateTaskListener ackedListener = (AckedClusterStateTaskListener) task.listener;
|
||||
if (ackedListener.ackTimeout() == null || ackedListener.ackTimeout().millis() == 0) {
|
||||
ackedListener.onAckTimeout();
|
||||
} else {
|
||||
try {
|
||||
ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool);
|
||||
ackListeners.add(new AckCountDownListener(ackedListener, newClusterState.version(), newClusterState.nodes(), threadPool));
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
|
||||
}
|
||||
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
ackedListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, source);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
// the fault detection will detect it as failed as well
|
||||
logger.warn("failed to connect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
updateTask.onFailure(source, t);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
clusterState = newClusterState;
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodesDelta.removedNodes()) {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
|
||||
}
|
||||
}
|
||||
|
||||
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
final Discovery.AckListener ackListener = new DelegetingAckListener(ackListeners);
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, source);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
// the fault detection will detect it as failed as well
|
||||
logger.warn("failed to connect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
clusterState = newClusterState;
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodesDelta.removedNodes()) {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
|
||||
}
|
||||
}
|
||||
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class UpdateTask<T> extends SourcePrioritizedRunnable {
|
||||
|
||||
public final T task;
|
||||
public final ClusterStateTaskConfig config;
|
||||
public final ClusterStateTaskExecutor<T> executor;
|
||||
public final ClusterStateTaskListener listener;
|
||||
public final AtomicBoolean processed = new AtomicBoolean();
|
||||
|
||||
UpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
|
||||
super(config.priority(), source);
|
||||
this.task = task;
|
||||
this.config = config;
|
||||
this.executor = executor;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
runTasksForExecutor(executor);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -707,13 +765,24 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
private static class NoOpAckListener implements Discovery.AckListener {
|
||||
private static class DelegetingAckListener implements Discovery.AckListener {
|
||||
|
||||
final private List<Discovery.AckListener> listeners;
|
||||
|
||||
private DelegetingAckListener(List<Discovery.AckListener> listeners) {
|
||||
this.listeners = listeners;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
|
||||
for (Discovery.AckListener listener : listeners) {
|
||||
listener.onNodeAck(node, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout() {
|
||||
throw new UnsupportedOperationException("no timeout delegation");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -721,20 +790,20 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
|
||||
|
||||
private final AckedClusterStateUpdateTask ackedUpdateTask;
|
||||
private final AckedClusterStateTaskListener ackedTaskListener;
|
||||
private final CountDown countDown;
|
||||
private final DiscoveryNodes nodes;
|
||||
private final long clusterStateVersion;
|
||||
private final Future<?> ackTimeoutCallback;
|
||||
private Throwable lastFailure;
|
||||
|
||||
AckCountDownListener(AckedClusterStateUpdateTask ackedUpdateTask, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
|
||||
this.ackedUpdateTask = ackedUpdateTask;
|
||||
AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
|
||||
this.ackedTaskListener = ackedTaskListener;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
this.nodes = nodes;
|
||||
int countDown = 0;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (ackedUpdateTask.mustAck(node)) {
|
||||
if (ackedTaskListener.mustAck(node)) {
|
||||
countDown++;
|
||||
}
|
||||
}
|
||||
|
@ -742,7 +811,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
countDown = Math.max(1, countDown);
|
||||
logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
|
||||
this.countDown = new CountDown(countDown);
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedUpdateTask.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
onTimeout();
|
||||
|
@ -752,7 +821,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
@Override
|
||||
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
|
||||
if (!ackedUpdateTask.mustAck(node)) {
|
||||
if (!ackedTaskListener.mustAck(node)) {
|
||||
//we always wait for the master ack anyway
|
||||
if (!node.equals(nodes.masterNode())) {
|
||||
return;
|
||||
|
@ -768,7 +837,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (countDown.countDown()) {
|
||||
logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion);
|
||||
FutureUtils.cancel(ackTimeoutCallback);
|
||||
ackedUpdateTask.onAllNodesAcked(lastFailure);
|
||||
ackedTaskListener.onAllNodesAcked(lastFailure);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -776,7 +845,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
public void onTimeout() {
|
||||
if (countDown.fastForward()) {
|
||||
logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion);
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
ackedTaskListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -788,5 +857,4 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -643,7 +643,8 @@ public class Base64 {
|
|||
try {
|
||||
encoded = encodeBytes(source, 0, source.length, NO_OPTIONS);
|
||||
} catch (java.io.IOException ex) {
|
||||
assert false : ex.getMessage();
|
||||
// not sure why this was an assertion before, running with assertions disabled would mean swallowing this exception
|
||||
throw new IllegalStateException(ex);
|
||||
} // end catch
|
||||
assert encoded != null;
|
||||
return encoded;
|
||||
|
@ -705,7 +706,7 @@ public class Base64 {
|
|||
try {
|
||||
encoded = encodeBytes(source, off, len, NO_OPTIONS);
|
||||
} catch (java.io.IOException ex) {
|
||||
assert false : ex.getMessage();
|
||||
throw new IllegalStateException(ex);
|
||||
} // end catch
|
||||
assert encoded != null;
|
||||
return encoded;
|
||||
|
@ -766,7 +767,7 @@ public class Base64 {
|
|||
try {
|
||||
encoded = encodeBytesToBytes(source, 0, source.length, Base64.NO_OPTIONS);
|
||||
} catch (java.io.IOException ex) {
|
||||
assert false : "IOExceptions only come from GZipping, which is turned off: " + ex.getMessage();
|
||||
throw new IllegalStateException("IOExceptions only come from GZipping, which is turned off: ", ex);
|
||||
}
|
||||
return encoded;
|
||||
}
|
||||
|
|
|
@ -65,8 +65,8 @@ public class MacAddressProvider {
|
|||
byte[] address = null;
|
||||
try {
|
||||
address = getMacAddress();
|
||||
} catch( SocketException se ) {
|
||||
logger.warn("Unable to get mac address, will use a dummy address", se);
|
||||
} catch (Throwable t) {
|
||||
logger.warn("Unable to get mac address, will use a dummy address", t);
|
||||
// address will be set below
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ public abstract class Terminal {
|
|||
}
|
||||
|
||||
public void printError(Throwable t) {
|
||||
printError("%s", t.getMessage());
|
||||
printError("%s", t.toString());
|
||||
if (isDebugEnabled) {
|
||||
printStackTrace(t);
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.store.IndexInput;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.compress.lzf.LZFCompressor;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -42,7 +41,6 @@ public class CompressorFactory {
|
|||
|
||||
static {
|
||||
compressors = new Compressor[] {
|
||||
new LZFCompressor(),
|
||||
new DeflateCompressor()
|
||||
};
|
||||
defaultCompressor = new DeflateCompressor();
|
||||
|
@ -82,12 +80,23 @@ public class CompressorFactory {
|
|||
|
||||
XContentType contentType = XContentFactory.xContentType(bytes);
|
||||
if (contentType == null) {
|
||||
if (isAncient(bytes)) {
|
||||
throw new IllegalStateException("unsupported compression: index was created before v2.0.0.beta1 and wasn't upgraded?");
|
||||
}
|
||||
throw new NotXContentException("Compressor detection can only be called on some xcontent bytes or compressed xcontent bytes");
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/** true if the bytes were compressed with LZF: only used before elasticsearch 2.0 */
|
||||
private static boolean isAncient(BytesReference bytes) {
|
||||
return bytes.length() >= 3 &&
|
||||
bytes.get(0) == 'Z' &&
|
||||
bytes.get(1) == 'V' &&
|
||||
(bytes.get(2) == 0 || bytes.get(2) == 1);
|
||||
}
|
||||
|
||||
public static Compressor compressor(ChannelBuffer buffer) {
|
||||
for (Compressor compressor : compressors) {
|
||||
if (compressor.isCompressed(buffer)) {
|
||||
|
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import org.apache.lucene.store.BufferedIndexInput;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressedIndexInput extends CompressedIndexInput {
|
||||
|
||||
private final ChunkDecoder decoder;
|
||||
// scratch area buffer
|
||||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedIndexInput(IndexInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in);
|
||||
|
||||
this.decoder = decoder;
|
||||
this.uncompressed = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
this.uncompressedLength = LZFChunk.MAX_CHUNK_LEN;
|
||||
this.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void readHeader(IndexInput in) throws IOException {
|
||||
byte[] header = new byte[LZFCompressor.LUCENE_HEADER.length];
|
||||
in.readBytes(header, 0, header.length, false);
|
||||
if (!Arrays.equals(header, LZFCompressor.LUCENE_HEADER)) {
|
||||
throw new IOException("wrong lzf compressed header [" + Arrays.toString(header) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int uncompress(IndexInput in, byte[] out) throws IOException {
|
||||
return decoder.decodeChunk(new InputStreamIndexInput(in, Long.MAX_VALUE), inputBuffer, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
// nothing to do here...
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput clone() {
|
||||
LZFCompressedIndexInput cloned = (LZFCompressedIndexInput) super.clone();
|
||||
cloned.inputBuffer = new byte[LZFChunk.MAX_CHUNK_LEN];
|
||||
return cloned;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput slice(String description, long offset, long length) throws IOException {
|
||||
return BufferedIndexInput.wrap(description, this, offset, length);
|
||||
}
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.BufferRecycler;
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import org.elasticsearch.common.compress.CompressedStreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class LZFCompressedStreamInput extends CompressedStreamInput {
|
||||
|
||||
private final BufferRecycler recycler;
|
||||
|
||||
private final ChunkDecoder decoder;
|
||||
|
||||
// scratch area buffer
|
||||
private byte[] inputBuffer;
|
||||
|
||||
public LZFCompressedStreamInput(StreamInput in, ChunkDecoder decoder) throws IOException {
|
||||
super(in);
|
||||
this.recycler = BufferRecycler.instance();
|
||||
this.decoder = decoder;
|
||||
|
||||
this.uncompressed = recycler.allocDecodeBuffer(LZFChunk.MAX_CHUNK_LEN);
|
||||
this.inputBuffer = recycler.allocInputBuffer(LZFChunk.MAX_CHUNK_LEN);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readHeader(StreamInput in) throws IOException {
|
||||
// nothing to do here, each chunk has a header
|
||||
}
|
||||
|
||||
@Override
|
||||
public int uncompress(StreamInput in, byte[] out) throws IOException {
|
||||
return decoder.decodeChunk(in, inputBuffer, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
byte[] buf = inputBuffer;
|
||||
if (buf != null) {
|
||||
inputBuffer = null;
|
||||
recycler.releaseInputBuffer(buf);
|
||||
}
|
||||
buf = uncompressed;
|
||||
if (buf != null) {
|
||||
uncompressed = null;
|
||||
recycler.releaseDecodeBuffer(uncompressed);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.compress.lzf;
|
||||
|
||||
import com.ning.compress.lzf.ChunkDecoder;
|
||||
import com.ning.compress.lzf.LZFChunk;
|
||||
import com.ning.compress.lzf.util.ChunkDecoderFactory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedIndexInput;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.deflate.DeflateCompressor;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link DeflateCompressor} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public class LZFCompressor implements Compressor {
|
||||
|
||||
static final byte[] LUCENE_HEADER = {'L', 'Z', 'F', 0};
|
||||
|
||||
private ChunkDecoder decoder;
|
||||
|
||||
public LZFCompressor() {
|
||||
this.decoder = ChunkDecoderFactory.safeInstance();
|
||||
Loggers.getLogger(LZFCompressor.class).debug("using decoder[{}] ", this.decoder.getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(BytesReference bytes) {
|
||||
return bytes.length() >= 3 &&
|
||||
bytes.get(0) == LZFChunk.BYTE_Z &&
|
||||
bytes.get(1) == LZFChunk.BYTE_V &&
|
||||
(bytes.get(2) == LZFChunk.BLOCK_TYPE_COMPRESSED || bytes.get(2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(ChannelBuffer buffer) {
|
||||
int offset = buffer.readerIndex();
|
||||
return buffer.readableBytes() >= 3 &&
|
||||
buffer.getByte(offset) == LZFChunk.BYTE_Z &&
|
||||
buffer.getByte(offset + 1) == LZFChunk.BYTE_V &&
|
||||
(buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_COMPRESSED || buffer.getByte(offset + 2) == LZFChunk.BLOCK_TYPE_NON_COMPRESSED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompressed(IndexInput in) throws IOException {
|
||||
long currentPointer = in.getFilePointer();
|
||||
// since we have some metdata before the first compressed header, we check on our specific header
|
||||
if (in.length() - currentPointer < (LUCENE_HEADER.length)) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < LUCENE_HEADER.length; i++) {
|
||||
if (in.readByte() != LUCENE_HEADER[i]) {
|
||||
in.seek(currentPointer);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
in.seek(currentPointer);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamInput streamInput(StreamInput in) throws IOException {
|
||||
return new LZFCompressedStreamInput(in, decoder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamOutput streamOutput(StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException("LZF is only here for back compat, no write support");
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompressedIndexInput indexInput(IndexInput in) throws IOException {
|
||||
return new LZFCompressedIndexInput(in, decoder);
|
||||
}
|
||||
}
|
|
@ -21,24 +21,30 @@ package org.elasticsearch.common.geo.builders;
|
|||
|
||||
import com.spatial4j.core.shape.Circle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.unit.DistanceUnit.Distance;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class CircleBuilder extends ShapeBuilder {
|
||||
|
||||
public static final String FIELD_RADIUS = "radius";
|
||||
public static final GeoShapeType TYPE = GeoShapeType.CIRCLE;
|
||||
|
||||
public static final CircleBuilder PROTOTYPE = new CircleBuilder();
|
||||
|
||||
private DistanceUnit unit;
|
||||
private double radius;
|
||||
private Coordinate center;
|
||||
|
||||
|
||||
/**
|
||||
* Set the center of the circle
|
||||
*
|
||||
*
|
||||
* @param center coordinate of the circles center
|
||||
* @return this
|
||||
*/
|
||||
|
@ -57,6 +63,13 @@ public class CircleBuilder extends ShapeBuilder {
|
|||
return center(new Coordinate(lon, lat));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the center of the circle
|
||||
*/
|
||||
public Coordinate center() {
|
||||
return center;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the radius of the circle. The String value will be parsed by {@link DistanceUnit}
|
||||
* @param radius Value and unit of the circle combined in a string
|
||||
|
@ -97,10 +110,24 @@ public class CircleBuilder extends ShapeBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the radius of the circle without unit
|
||||
*/
|
||||
public double radius() {
|
||||
return this.radius;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the radius unit of the circle
|
||||
*/
|
||||
public DistanceUnit unit() {
|
||||
return this.unit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_RADIUS, unit.toString(radius));
|
||||
builder.field(FIELD_COORDINATES);
|
||||
toXContent(builder, center);
|
||||
|
@ -116,4 +143,37 @@ public class CircleBuilder extends ShapeBuilder {
|
|||
public GeoShapeType type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(center, radius, unit.ordinal());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
CircleBuilder other = (CircleBuilder) obj;
|
||||
return Objects.equals(center, other.center) &&
|
||||
Objects.equals(radius, other.radius) &&
|
||||
Objects.equals(unit.ordinal(), other.unit.ordinal());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
writeCoordinateTo(center, out);
|
||||
out.writeDouble(radius);
|
||||
DistanceUnit.writeDistanceUnit(out, unit);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CircleBuilder readFrom(StreamInput in) throws IOException {
|
||||
return new CircleBuilder()
|
||||
.center(readCoordinateFrom(in))
|
||||
.radius(in.readDouble(), DistanceUnit.readDistanceUnit(in));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,13 +21,19 @@ package org.elasticsearch.common.geo.builders;
|
|||
|
||||
import com.spatial4j.core.shape.Rectangle;
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
public class EnvelopeBuilder extends ShapeBuilder {
|
||||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
|
||||
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
|
||||
public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder();
|
||||
|
||||
protected Coordinate topLeft;
|
||||
protected Coordinate bottomRight;
|
||||
|
@ -61,7 +67,8 @@ public class EnvelopeBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
|
||||
builder.startArray(FIELD_COORDINATES);
|
||||
toXContent(builder, topLeft);
|
||||
toXContent(builder, bottomRight);
|
||||
|
@ -78,4 +85,38 @@ public class EnvelopeBuilder extends ShapeBuilder {
|
|||
public GeoShapeType type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(orientation, topLeft, bottomRight);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
EnvelopeBuilder other = (EnvelopeBuilder) obj;
|
||||
return Objects.equals(orientation, other.orientation) &&
|
||||
Objects.equals(topLeft, other.topLeft) &&
|
||||
Objects.equals(bottomRight, other.bottomRight);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(orientation == Orientation.RIGHT);
|
||||
writeCoordinateTo(topLeft, out);
|
||||
writeCoordinateTo(bottomRight, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public EnvelopeBuilder readFrom(StreamInput in) throws IOException {
|
||||
Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT;
|
||||
return new EnvelopeBuilder(orientation)
|
||||
.topLeft(readCoordinateFrom(in))
|
||||
.bottomRight(readCoordinateFrom(in));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.startArray(FIELD_GEOMETRIES);
|
||||
for (ShapeBuilder shape : shapes) {
|
||||
shape.toXContent(builder, params);
|
||||
|
|
|
@ -34,12 +34,10 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
|
||||
|
||||
protected boolean translated = false;
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
coordinatesToXcontent(builder, false);
|
||||
builder.endObject();
|
||||
|
|
|
@ -57,7 +57,7 @@ public class MultiLineStringBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
builder.startArray();
|
||||
for(LineStringBuilder line : lines) {
|
||||
|
|
|
@ -37,7 +37,7 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
super.coordinatesToXcontent(builder, false);
|
||||
builder.endObject();
|
||||
|
|
|
@ -51,7 +51,7 @@ public class MultiPolygonBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.startArray(FIELD_COORDINATES);
|
||||
for(PolygonBuilder polygon : polygons) {
|
||||
builder.startArray();
|
||||
|
@ -89,6 +89,4 @@ public class MultiPolygonBuilder extends ShapeBuilder {
|
|||
return new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
|
||||
//note: ShapeCollection is probably faster than a Multi* geom.
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -20,7 +20,10 @@
|
|||
package org.elasticsearch.common.geo.builders;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import com.spatial4j.core.shape.Point;
|
||||
|
@ -30,6 +33,8 @@ public class PointBuilder extends ShapeBuilder {
|
|||
|
||||
public static final GeoShapeType TYPE = GeoShapeType.POINT;
|
||||
|
||||
public static final PointBuilder PROTOTYPE = new PointBuilder();
|
||||
|
||||
private Coordinate coordinate;
|
||||
|
||||
public PointBuilder coordinate(Coordinate coordinate) {
|
||||
|
@ -48,10 +53,10 @@ public class PointBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.field(FIELD_COORDINATES);
|
||||
toXContent(builder, coordinate);
|
||||
return builder.endObject();
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -63,4 +68,31 @@ public class PointBuilder extends ShapeBuilder {
|
|||
public GeoShapeType type() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(coordinate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
PointBuilder other = (PointBuilder) obj;
|
||||
return Objects.equals(coordinate, other.coordinate);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
writeCoordinateTo(coordinate, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PointBuilder readFrom(StreamInput in) throws IOException {
|
||||
return new PointBuilder().coordinate(readCoordinateFrom(in));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ import java.util.Arrays;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
|
||||
|
@ -141,9 +142,10 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||
|
||||
Edge[] edges = new Edge[numEdges];
|
||||
Edge[] holeComponents = new Edge[holes.size()];
|
||||
int offset = createEdges(0, orientation, shell, null, edges, 0);
|
||||
final AtomicBoolean translated = new AtomicBoolean(false);
|
||||
int offset = createEdges(0, orientation, shell, null, edges, 0, translated);
|
||||
for (int i = 0; i < holes.size(); i++) {
|
||||
int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset);
|
||||
int length = createEdges(i+1, orientation, shell, this.holes.get(i), edges, offset, translated);
|
||||
holeComponents[i] = edges[offset];
|
||||
offset += length;
|
||||
}
|
||||
|
@ -172,7 +174,7 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(FIELD_TYPE, TYPE.shapename);
|
||||
builder.field(FIELD_TYPE, TYPE.shapeName());
|
||||
builder.startArray(FIELD_COORDINATES);
|
||||
coordinatesArray(builder, params);
|
||||
builder.endArray();
|
||||
|
@ -508,14 +510,157 @@ public class PolygonBuilder extends ShapeBuilder {
|
|||
}
|
||||
|
||||
private static int createEdges(int component, Orientation orientation, LineStringBuilder shell,
|
||||
LineStringBuilder hole,
|
||||
Edge[] edges, int offset) {
|
||||
LineStringBuilder hole, Edge[] edges, int offset, final AtomicBoolean translated) {
|
||||
// inner rings (holes) have an opposite direction than the outer rings
|
||||
// XOR will invert the orientation for outer ring cases (Truth Table:, T/T = F, T/F = T, F/T = T, F/F = F)
|
||||
boolean direction = (component == 0 ^ orientation == Orientation.RIGHT);
|
||||
// set the points array accordingly (shell or hole)
|
||||
Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false);
|
||||
Edge.ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1);
|
||||
ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1, translated);
|
||||
return points.length-1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a connected list of a list of coordinates
|
||||
*
|
||||
* @param points
|
||||
* array of point
|
||||
* @param offset
|
||||
* index of the first point
|
||||
* @param length
|
||||
* number of points
|
||||
* @return Array of edges
|
||||
*/
|
||||
private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
// neighbors orientation. So direction is equivalent
|
||||
// to clockwise/counterclockwise
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (offset + ((top + length - 1) % length));
|
||||
final int next = (offset + ((top + 1) % length));
|
||||
boolean orientation = points[offset + prev].x > points[offset + next].x;
|
||||
|
||||
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
|
||||
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
|
||||
// thus if orientation is computed as cw, the logic will translate points across dateline
|
||||
// and convert to a right handed system
|
||||
|
||||
// compute the bounding box and calculate range
|
||||
double[] range = range(points, offset, length);
|
||||
final double rng = range[1] - range[0];
|
||||
// translate the points if the following is true
|
||||
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
|
||||
// (translation would result in a collapsed poly)
|
||||
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
|
||||
boolean incorrectOrientation = component == 0 && handedness != orientation;
|
||||
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (translated.get() && component != 0)) {
|
||||
translate(points);
|
||||
// flip the translation bit if the shell is being translated
|
||||
if (component == 0) {
|
||||
translated.set(true);
|
||||
}
|
||||
// correct the orientation post translation (ccw for shell, cw for holes)
|
||||
if (component == 0 || (component != 0 && handedness == orientation)) {
|
||||
orientation = !orientation;
|
||||
}
|
||||
}
|
||||
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
|
||||
}
|
||||
|
||||
private static final int top(Coordinate[] points, int offset, int length) {
|
||||
int top = 0; // we start at 1 here since top points to 0
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (points[offset + i].y < points[offset + top].y) {
|
||||
top = i;
|
||||
} else if (points[offset + i].y == points[offset + top].y) {
|
||||
if (points[offset + i].x < points[offset + top].x) {
|
||||
top = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
return top;
|
||||
}
|
||||
|
||||
private static final double[] range(Coordinate[] points, int offset, int length) {
|
||||
double minX = points[0].x;
|
||||
double maxX = points[0].x;
|
||||
double minY = points[0].y;
|
||||
double maxY = points[0].y;
|
||||
// compute the bounding coordinates (@todo: cleanup brute force)
|
||||
for (int i = 1; i < length; ++i) {
|
||||
if (points[offset + i].x < minX) {
|
||||
minX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].x > maxX) {
|
||||
maxX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].y < minY) {
|
||||
minY = points[offset + i].y;
|
||||
}
|
||||
if (points[offset + i].y > maxY) {
|
||||
maxY = points[offset + i].y;
|
||||
}
|
||||
}
|
||||
return new double[] {minX, maxX, minY, maxY};
|
||||
}
|
||||
|
||||
/**
|
||||
* Concatenate a set of points to a polygon
|
||||
*
|
||||
* @param component
|
||||
* component id of the polygon
|
||||
* @param direction
|
||||
* direction of the ring
|
||||
* @param points
|
||||
* list of points to concatenate
|
||||
* @param pointOffset
|
||||
* index of the first point
|
||||
* @param edges
|
||||
* Array of edges to write the result to
|
||||
* @param edgeOffset
|
||||
* index of the first edge in the result
|
||||
* @param length
|
||||
* number of points to use
|
||||
* @return the edges creates
|
||||
*/
|
||||
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
|
||||
int length) {
|
||||
assert edges.length >= length+edgeOffset;
|
||||
assert points.length >= length+pointOffset;
|
||||
edges[edgeOffset] = new Edge(points[pointOffset], null);
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (direction) {
|
||||
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
|
||||
edges[edgeOffset + i].component = component;
|
||||
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
|
||||
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
|
||||
edges[edgeOffset + i - 1].component = component;
|
||||
} else {
|
||||
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (direction) {
|
||||
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
|
||||
edges[edgeOffset].component = component;
|
||||
} else {
|
||||
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
|
||||
edges[edgeOffset + length - 1].component = component;
|
||||
}
|
||||
|
||||
return edges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
|
||||
*/
|
||||
private static void translate(Coordinate[] points) {
|
||||
for (Coordinate c : points) {
|
||||
if (c.x < 0) {
|
||||
c.x += 2*DATELINE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,8 +26,12 @@ import com.spatial4j.core.shape.jts.JtsGeometry;
|
|||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
import com.vividsolutions.jts.geom.Geometry;
|
||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.unit.DistanceUnit.Distance;
|
||||
|
@ -43,7 +47,7 @@ import java.util.*;
|
|||
/**
|
||||
* Basic class for building GeoJSON shapes like Polygons, Linestrings, etc
|
||||
*/
|
||||
public abstract class ShapeBuilder extends ToXContentToBytes {
|
||||
public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWriteable<ShapeBuilder> {
|
||||
|
||||
protected static final ESLogger LOGGER = ESLoggerFactory.getLogger(ShapeBuilder.class.getName());
|
||||
|
||||
|
@ -173,6 +177,15 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
return builder.startArray().value(coordinate.x).value(coordinate.y).endArray();
|
||||
}
|
||||
|
||||
protected static void writeCoordinateTo(Coordinate coordinate, StreamOutput out) throws IOException {
|
||||
out.writeDouble(coordinate.x);
|
||||
out.writeDouble(coordinate.y);
|
||||
}
|
||||
|
||||
protected Coordinate readCoordinateFrom(StreamInput in) throws IOException {
|
||||
return new Coordinate(in.readDouble(), in.readDouble());
|
||||
}
|
||||
|
||||
public static Orientation orientationFromString(String orientation) {
|
||||
orientation = orientation.toLowerCase(Locale.ROOT);
|
||||
switch (orientation) {
|
||||
|
@ -349,150 +362,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
}
|
||||
}
|
||||
|
||||
private static final int top(Coordinate[] points, int offset, int length) {
|
||||
int top = 0; // we start at 1 here since top points to 0
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (points[offset + i].y < points[offset + top].y) {
|
||||
top = i;
|
||||
} else if (points[offset + i].y == points[offset + top].y) {
|
||||
if (points[offset + i].x < points[offset + top].x) {
|
||||
top = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
return top;
|
||||
}
|
||||
|
||||
private static final double[] range(Coordinate[] points, int offset, int length) {
|
||||
double minX = points[0].x;
|
||||
double maxX = points[0].x;
|
||||
double minY = points[0].y;
|
||||
double maxY = points[0].y;
|
||||
// compute the bounding coordinates (@todo: cleanup brute force)
|
||||
for (int i = 1; i < length; ++i) {
|
||||
if (points[offset + i].x < minX) {
|
||||
minX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].x > maxX) {
|
||||
maxX = points[offset + i].x;
|
||||
}
|
||||
if (points[offset + i].y < minY) {
|
||||
minY = points[offset + i].y;
|
||||
}
|
||||
if (points[offset + i].y > maxY) {
|
||||
maxY = points[offset + i].y;
|
||||
}
|
||||
}
|
||||
return new double[] {minX, maxX, minY, maxY};
|
||||
}
|
||||
|
||||
/**
|
||||
* Concatenate a set of points to a polygon
|
||||
*
|
||||
* @param component
|
||||
* component id of the polygon
|
||||
* @param direction
|
||||
* direction of the ring
|
||||
* @param points
|
||||
* list of points to concatenate
|
||||
* @param pointOffset
|
||||
* index of the first point
|
||||
* @param edges
|
||||
* Array of edges to write the result to
|
||||
* @param edgeOffset
|
||||
* index of the first edge in the result
|
||||
* @param length
|
||||
* number of points to use
|
||||
* @return the edges creates
|
||||
*/
|
||||
private static Edge[] concat(int component, boolean direction, Coordinate[] points, final int pointOffset, Edge[] edges, final int edgeOffset,
|
||||
int length) {
|
||||
assert edges.length >= length+edgeOffset;
|
||||
assert points.length >= length+pointOffset;
|
||||
edges[edgeOffset] = new Edge(points[pointOffset], null);
|
||||
for (int i = 1; i < length; i++) {
|
||||
if (direction) {
|
||||
edges[edgeOffset + i] = new Edge(points[pointOffset + i], edges[edgeOffset + i - 1]);
|
||||
edges[edgeOffset + i].component = component;
|
||||
} else if(!edges[edgeOffset + i - 1].coordinate.equals(points[pointOffset + i])) {
|
||||
edges[edgeOffset + i - 1].next = edges[edgeOffset + i] = new Edge(points[pointOffset + i], null);
|
||||
edges[edgeOffset + i - 1].component = component;
|
||||
} else {
|
||||
throw new InvalidShapeException("Provided shape has duplicate consecutive coordinates at: " + points[pointOffset + i]);
|
||||
}
|
||||
}
|
||||
|
||||
if (direction) {
|
||||
edges[edgeOffset].setNext(edges[edgeOffset + length - 1]);
|
||||
edges[edgeOffset].component = component;
|
||||
} else {
|
||||
edges[edgeOffset + length - 1].setNext(edges[edgeOffset]);
|
||||
edges[edgeOffset + length - 1].component = component;
|
||||
}
|
||||
|
||||
return edges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a connected list of a list of coordinates
|
||||
*
|
||||
* @param points
|
||||
* array of point
|
||||
* @param offset
|
||||
* index of the first point
|
||||
* @param length
|
||||
* number of points
|
||||
* @return Array of edges
|
||||
*/
|
||||
protected static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
// neighbors orientation. So direction is equivalent
|
||||
// to clockwise/counterclockwise
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (offset + ((top + length - 1) % length));
|
||||
final int next = (offset + ((top + 1) % length));
|
||||
boolean orientation = points[offset + prev].x > points[offset + next].x;
|
||||
|
||||
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
|
||||
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
|
||||
// thus if orientation is computed as cw, the logic will translate points across dateline
|
||||
// and convert to a right handed system
|
||||
|
||||
// compute the bounding box and calculate range
|
||||
double[] range = range(points, offset, length);
|
||||
final double rng = range[1] - range[0];
|
||||
// translate the points if the following is true
|
||||
// 1. shell orientation is cw and range is greater than a hemisphere (180 degrees) but not spanning 2 hemispheres
|
||||
// (translation would result in a collapsed poly)
|
||||
// 2. the shell of the candidate hole has been translated (to preserve the coordinate system)
|
||||
boolean incorrectOrientation = component == 0 && handedness != orientation;
|
||||
if ( (incorrectOrientation && (rng > DATELINE && rng != 2*DATELINE)) || (shell.translated && component != 0)) {
|
||||
translate(points);
|
||||
// flip the translation bit if the shell is being translated
|
||||
if (component == 0) {
|
||||
shell.translated = true;
|
||||
}
|
||||
// correct the orientation post translation (ccw for shell, cw for holes)
|
||||
if (component == 0 || (component != 0 && handedness == orientation)) {
|
||||
orientation = !orientation;
|
||||
}
|
||||
}
|
||||
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Transforms coordinates in the eastern hemisphere (-180:0) to a (180:360) range
|
||||
*/
|
||||
protected static void translate(Coordinate[] points) {
|
||||
for (Coordinate c : points) {
|
||||
if (c.x < 0) {
|
||||
c.x += 2*DATELINE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the intersection of this line segment to the given position
|
||||
*
|
||||
|
@ -504,7 +373,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
return intersect = position(coordinate, next.coordinate, position);
|
||||
}
|
||||
|
||||
public static Coordinate position(Coordinate p1, Coordinate p2, double position) {
|
||||
protected static Coordinate position(Coordinate p1, Coordinate p2, double position) {
|
||||
if (position == 0) {
|
||||
return p1;
|
||||
} else if (position == 1) {
|
||||
|
@ -529,7 +398,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
public int compare(Edge o1, Edge o2) {
|
||||
return Double.compare(o1.intersect.y, o2.intersect.y);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static enum Orientation {
|
||||
|
@ -565,12 +433,16 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
ENVELOPE("envelope"),
|
||||
CIRCLE("circle");
|
||||
|
||||
protected final String shapename;
|
||||
private final String shapename;
|
||||
|
||||
private GeoShapeType(String shapename) {
|
||||
this.shapename = shapename;
|
||||
}
|
||||
|
||||
protected String shapeName() {
|
||||
return shapename;
|
||||
}
|
||||
|
||||
public static GeoShapeType forName(String geoshapename) {
|
||||
String typename = geoshapename.toLowerCase(Locale.ROOT);
|
||||
for (GeoShapeType type : values()) {
|
||||
|
@ -823,4 +695,20 @@ public abstract class ShapeBuilder extends ToXContentToBytes {
|
|||
return geometryCollection;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return type().shapeName();
|
||||
}
|
||||
|
||||
// NORELEASE this should be deleted as soon as all shape builders implement writable
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
}
|
||||
|
||||
// NORELEASE this should be deleted as soon as all shape builders implement writable
|
||||
@Override
|
||||
public ShapeBuilder readFrom(StreamInput in) throws IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -331,6 +331,6 @@ public abstract class Multibinder<T> {
|
|||
|
||||
NullPointerException npe = new NullPointerException(name);
|
||||
throw new ConfigurationException(singleton(
|
||||
new Message(emptyList(), npe.toString(), npe)));
|
||||
new Message(emptyList(), npe)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,6 +58,10 @@ public final class Message implements Serializable, Element {
|
|||
this(Collections.singletonList(source), message, null);
|
||||
}
|
||||
|
||||
public Message(Object source, Throwable cause) {
|
||||
this(Collections.singletonList(source), null, cause);
|
||||
}
|
||||
|
||||
public Message(String message) {
|
||||
this(Collections.emptyList(), message, null);
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.common.lease;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/** Utility methods to work with {@link Releasable}s. */
|
||||
|
|
|
@ -26,8 +26,14 @@ import org.elasticsearch.common.inject.AbstractModule;
|
|||
*/
|
||||
public class NetworkModule extends AbstractModule {
|
||||
|
||||
private final NetworkService networkService;
|
||||
|
||||
public NetworkModule(NetworkService networkService) {
|
||||
this.networkService = networkService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(NetworkService.class).asEagerSingleton();
|
||||
bind(NetworkService.class).toInstance(networkService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.common.network;
|
||||
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -80,7 +79,6 @@ public class NetworkService extends AbstractComponent {
|
|||
|
||||
private final List<CustomNameResolver> customNameResolvers = new CopyOnWriteArrayList<>();
|
||||
|
||||
@Inject
|
||||
public NetworkService(Settings settings) {
|
||||
super(settings);
|
||||
IfConfig.logIfNecessary();
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.common.settings;
|
|||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.xcontent.ToXContent.Params;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
|
@ -35,7 +34,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class SettingsFilter extends AbstractComponent {
|
||||
public final class SettingsFilter extends AbstractComponent {
|
||||
/**
|
||||
* Can be used to specify settings filter that will be used to filter out matching settings in toXContent method
|
||||
*/
|
||||
|
@ -43,7 +42,6 @@ public class SettingsFilter extends AbstractComponent {
|
|||
|
||||
private final CopyOnWriteArrayList<String> patterns = new CopyOnWriteArrayList<>();
|
||||
|
||||
@Inject
|
||||
public SettingsFilter(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
|
|
@ -29,14 +29,16 @@ import org.elasticsearch.common.inject.AbstractModule;
|
|||
public class SettingsModule extends AbstractModule {
|
||||
|
||||
private final Settings settings;
|
||||
private final SettingsFilter settingsFilter;
|
||||
|
||||
public SettingsModule(Settings settings) {
|
||||
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
|
||||
this.settings = settings;
|
||||
this.settingsFilter = settingsFilter;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(Settings.class).toInstance(settings);
|
||||
bind(SettingsFilter.class).asEagerSingleton();
|
||||
bind(SettingsFilter.class).toInstance(settingsFilter);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue