merge master
This commit is contained in:
commit
3744fb9dc0
|
@ -1,37 +1,42 @@
|
|||
|
||||
# intellij files
|
||||
.idea/
|
||||
.gradle/
|
||||
*.iml
|
||||
*.ipr
|
||||
*.iws
|
||||
work/
|
||||
/data/
|
||||
logs/
|
||||
.DS_Store
|
||||
build/
|
||||
generated-resources/
|
||||
**/.local*
|
||||
docs/html/
|
||||
docs/build.log
|
||||
/tmp/
|
||||
backwards/
|
||||
html_docs
|
||||
.vagrant/
|
||||
|
||||
## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects)
|
||||
## All files (.project, .classpath, .settings/*) should be generated through Maven which
|
||||
## will correctly set the classpath based on the declared dependencies and write settings
|
||||
## files to ensure common coding style across Eclipse and IDEA.
|
||||
# eclipse files
|
||||
.project
|
||||
.classpath
|
||||
eclipse-build
|
||||
.settings
|
||||
|
||||
## netbeans ignores
|
||||
# netbeans files
|
||||
nb-configuration.xml
|
||||
nbactions.xml
|
||||
|
||||
dependency-reduced-pom.xml
|
||||
# gradle stuff
|
||||
.gradle/
|
||||
build/
|
||||
generated-resources/
|
||||
|
||||
# old patterns specific to maven
|
||||
# maven stuff (to be removed when trunk becomes 4.x)
|
||||
*-execution-hints.log
|
||||
target/
|
||||
dependency-reduced-pom.xml
|
||||
|
||||
# testing stuff
|
||||
**/.local*
|
||||
.vagrant/
|
||||
|
||||
# osx stuff
|
||||
.DS_Store
|
||||
|
||||
# needed in case docs build is run...maybe we can configure doc build to generate files under build?
|
||||
html_docs
|
||||
|
||||
# random old stuff that we should look at the necessity of...
|
||||
/tmp/
|
||||
backwards/
|
||||
|
||||
|
||||
|
|
|
@ -84,7 +84,9 @@ Please follow these formatting guidelines:
|
|||
* Line width is 140 characters
|
||||
* The rest is left to Java coding standards
|
||||
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
||||
* Don't worry too much about imports. Try not to change the order but don't worry about fighting your IDE to stop it from switching from * imports to specific imports or from specific to * imports.
|
||||
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
||||
* Eclipse: Preferences->Java->Code Style->Organize Imports. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
||||
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
|
||||
|
||||
To create a distribution from the source, simply run:
|
||||
|
||||
|
|
|
@ -290,14 +290,14 @@ The REST tests are run automatically when executing the "gradle check" command.
|
|||
REST tests use the following command:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
A specific test case can be run with
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
gradle :distribution:integ-test-zip:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT \
|
||||
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
|
||||
---------------------------------------------------------------------------
|
||||
|
|
30
build.gradle
30
build.gradle
|
@ -109,7 +109,7 @@ subprojects {
|
|||
ext.projectSubstitutions = [
|
||||
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
|
||||
"org.elasticsearch:elasticsearch:${version}": ':core',
|
||||
"org.elasticsearch:test-framework:${version}": ':test-framework',
|
||||
"org.elasticsearch.test:framework:${version}": ':test:framework',
|
||||
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
|
||||
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
|
||||
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
|
||||
|
@ -131,8 +131,8 @@ subprojects {
|
|||
// the dependency is added.
|
||||
gradle.projectsEvaluated {
|
||||
allprojects {
|
||||
if (project.path == ':test-framework') {
|
||||
// :test-framework:test cannot run before and after :core:test
|
||||
if (project.path == ':test:framework') {
|
||||
// :test:framework:test cannot run before and after :core:test
|
||||
return
|
||||
}
|
||||
configurations.all {
|
||||
|
@ -169,6 +169,30 @@ gradle.projectsEvaluated {
|
|||
// intellij configuration
|
||||
allprojects {
|
||||
apply plugin: 'idea'
|
||||
|
||||
idea {
|
||||
module {
|
||||
// same as for the IntelliJ Gradle tooling integration
|
||||
inheritOutputDirs = false
|
||||
outputDir = file('build/classes/main')
|
||||
testOutputDir = file('build/classes/test')
|
||||
|
||||
iml {
|
||||
// fix so that Gradle idea plugin properly generates support for resource folders
|
||||
// see also https://issues.gradle.org/browse/GRADLE-2975
|
||||
withXml {
|
||||
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each {
|
||||
it.attributes().remove('isTestSource')
|
||||
it.attributes().put('type', 'java-resource')
|
||||
}
|
||||
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each {
|
||||
it.attributes().remove('isTestSource')
|
||||
it.attributes().put('type', 'java-test-resource')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
idea {
|
||||
|
|
|
@ -63,6 +63,7 @@ dependencies {
|
|||
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
|
||||
compile 'de.thetaphi:forbiddenapis:2.0'
|
||||
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
}
|
||||
|
||||
processResources {
|
||||
|
|
|
@ -2,7 +2,6 @@ package com.carrotsearch.gradle.junit4
|
|||
|
||||
import com.carrotsearch.ant.tasks.junit4.ListenersList
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import com.esotericsoftware.kryo.serializers.FieldSerializer
|
||||
import groovy.xml.NamespaceBuilder
|
||||
import groovy.xml.NamespaceBuilderSupport
|
||||
import org.apache.tools.ant.BuildException
|
||||
|
@ -14,7 +13,10 @@ import org.gradle.api.file.FileCollection
|
|||
import org.gradle.api.file.FileTreeElement
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.specs.Spec
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
import org.gradle.api.tasks.Optional
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.util.PatternFilterable
|
||||
import org.gradle.api.tasks.util.PatternSet
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
|
|
|
@ -27,10 +27,13 @@ import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultE
|
|||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import org.gradle.logging.ProgressLogger
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
import org.junit.runner.Description
|
||||
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.*
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.FAILURE
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.IGNORED_ASSUMPTION
|
||||
import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.OK
|
||||
import static java.lang.Math.max
|
||||
|
||||
/**
|
||||
|
|
|
@ -5,8 +5,21 @@ import com.carrotsearch.ant.tasks.junit4.Pluralize
|
|||
import com.carrotsearch.ant.tasks.junit4.TestsSummaryEventListener
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.base.Strings
|
||||
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe
|
||||
import com.carrotsearch.ant.tasks.junit4.events.*
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.*
|
||||
import com.carrotsearch.ant.tasks.junit4.events.EventType
|
||||
import com.carrotsearch.ant.tasks.junit4.events.IEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.IStreamEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.SuiteStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.TestFinishedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedQuitEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteStartedEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.ChildBootstrap
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.HeartBeatEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.PartialOutputEvent
|
||||
import com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus
|
||||
import com.carrotsearch.ant.tasks.junit4.events.mirrors.FailureMirror
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
|
||||
import com.carrotsearch.ant.tasks.junit4.listeners.StackTraceFilter
|
||||
|
@ -15,16 +28,17 @@ import org.gradle.api.logging.LogLevel
|
|||
import org.gradle.api.logging.Logger
|
||||
import org.junit.runner.Description
|
||||
|
||||
import javax.sound.sampled.AudioSystem
|
||||
import javax.sound.sampled.Clip
|
||||
import javax.sound.sampled.Line
|
||||
import javax.sound.sampled.LineEvent
|
||||
import javax.sound.sampled.LineListener
|
||||
import java.util.concurrent.atomic.AtomicBoolean
|
||||
import java.util.concurrent.atomic.AtomicInteger
|
||||
|
||||
import javax.sound.sampled.AudioSystem;
|
||||
import javax.sound.sampled.Clip;
|
||||
import javax.sound.sampled.Line;
|
||||
import javax.sound.sampled.LineEvent;
|
||||
import javax.sound.sampled.LineListener;
|
||||
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.*
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDescription
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds
|
||||
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatTime
|
||||
import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode
|
||||
|
||||
class TestReportLogger extends TestsSummaryEventListener implements AggregatedEventListener {
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.apache.tools.ant.BuildListener
|
||||
import org.apache.tools.ant.BuildLogger
|
||||
import org.apache.tools.ant.DefaultLogger
|
||||
import org.apache.tools.ant.Project
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import java.nio.charset.Charset
|
||||
|
||||
/**
|
||||
* A task which will run ant commands.
|
||||
*
|
||||
* Logging for the task is customizable for subclasses by overriding makeLogger.
|
||||
*/
|
||||
public abstract class AntTask extends DefaultTask {
|
||||
|
||||
/**
|
||||
* A buffer that will contain the output of the ant code run,
|
||||
* if the output was not already written directly to stdout.
|
||||
*/
|
||||
public final ByteArrayOutputStream outputBuffer = new ByteArrayOutputStream()
|
||||
|
||||
@TaskAction
|
||||
final void executeTask() {
|
||||
AntBuilder ant = new AntBuilder()
|
||||
|
||||
// remove existing loggers, we add our own
|
||||
List<BuildLogger> toRemove = new ArrayList<>();
|
||||
for (BuildListener listener : ant.project.getBuildListeners()) {
|
||||
if (listener instanceof BuildLogger) {
|
||||
toRemove.add(listener);
|
||||
}
|
||||
}
|
||||
for (BuildLogger listener : toRemove) {
|
||||
ant.project.removeBuildListener(listener)
|
||||
}
|
||||
|
||||
// otherwise groovy replaces System.out, and you have no chance to debug
|
||||
// ant.saveStreams = false
|
||||
|
||||
final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : Project.MSG_INFO
|
||||
final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name())
|
||||
BuildLogger antLogger = makeLogger(stream, outputLevel)
|
||||
|
||||
ant.project.addBuildListener(antLogger)
|
||||
try {
|
||||
runAnt(ant)
|
||||
} catch (Exception e) {
|
||||
// ant failed, so see if we have buffered output to emit, then rethrow the failure
|
||||
String buffer = outputBuffer.toString()
|
||||
if (buffer.isEmpty() == false) {
|
||||
logger.error("=== Ant output ===\n${buffer}")
|
||||
}
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
/** Runs the doAnt closure. This can be overridden by subclasses instead of having to set a closure. */
|
||||
protected abstract void runAnt(AntBuilder ant)
|
||||
|
||||
/** Create the logger the ant runner will use, with the given stream for error/output. */
|
||||
protected BuildLogger makeLogger(PrintStream stream, int outputLevel) {
|
||||
return new DefaultLogger(
|
||||
errorPrintStream: stream,
|
||||
outputPrintStream: stream,
|
||||
messageOutputLevel: outputLevel)
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the ant logger should write to stdout, or false if to the buffer.
|
||||
* The default implementation writes to the buffer when gradle info logging is disabled.
|
||||
*/
|
||||
protected boolean useStdout() {
|
||||
return logger.isInfoEnabled()
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -18,22 +18,30 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.process.ExecResult
|
||||
|
||||
import java.time.ZonedDateTime
|
||||
import java.time.ZoneOffset
|
||||
|
||||
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.artifacts.*
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.JavaVersion
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.XmlProvider
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.artifacts.ModuleDependency
|
||||
import org.gradle.api.artifacts.ModuleVersionIdentifier
|
||||
import org.gradle.api.artifacts.ProjectDependency
|
||||
import org.gradle.api.artifacts.ResolvedArtifact
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.artifacts.maven.MavenPom
|
||||
import org.gradle.api.tasks.bundling.Jar
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
import org.gradle.internal.jvm.Jvm
|
||||
import org.gradle.process.ExecResult
|
||||
import org.gradle.util.GradleVersion
|
||||
|
||||
import java.time.ZoneOffset
|
||||
import java.time.ZonedDateTime
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for elasticsearch projects.
|
||||
*/
|
||||
|
@ -190,6 +198,10 @@ class BuildPlugin implements Plugin<Project> {
|
|||
* to iterate the transitive dependencies and add excludes.
|
||||
*/
|
||||
static void configureConfigurations(Project project) {
|
||||
// we are not shipping these jars, we act like dumb consumers of these things
|
||||
if (project.path.startsWith(':test:fixtures')) {
|
||||
return
|
||||
}
|
||||
// fail on any conflicting dependency versions
|
||||
project.configurations.all({ Configuration configuration ->
|
||||
if (configuration.name.startsWith('_transitive_')) {
|
||||
|
@ -197,12 +209,16 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// we just have them to find *what* transitive deps exist
|
||||
return
|
||||
}
|
||||
if (configuration.name.endsWith('Fixture')) {
|
||||
// just a self contained test-fixture configuration, likely transitive and hellacious
|
||||
return
|
||||
}
|
||||
configuration.resolutionStrategy.failOnVersionConflict()
|
||||
})
|
||||
|
||||
// force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself
|
||||
Closure disableTransitiveDeps = { ModuleDependency dep ->
|
||||
if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') {
|
||||
if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) {
|
||||
dep.transitive = false
|
||||
|
||||
// also create a configuration just for this dependency version, so that later
|
||||
|
@ -302,6 +318,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@
|
|||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.internal.nativeintegration.filesystem.Chmod
|
||||
import java.io.File
|
||||
|
||||
import javax.inject.Inject
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,8 +19,9 @@
|
|||
package org.elasticsearch.gradle
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import java.io.File
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
/**
|
||||
* Creates a file and sets it contents to something.
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.gradle.BuildPlugin
|
|||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.artifacts.Dependency
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.bundling.Zip
|
||||
|
||||
|
@ -60,7 +60,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
private static void configureDependencies(Project project) {
|
||||
project.dependencies {
|
||||
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}"
|
||||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
|
@ -101,6 +101,11 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
from pluginMetadata // metadata (eg custom security policy)
|
||||
from project.jar // this plugin's jar
|
||||
from project.configurations.runtime - project.configurations.provided // the dep jars
|
||||
// hack just for slf4j, in case it is "upgrade" from provided to compile,
|
||||
// since it is not actually provided in distributions
|
||||
from project.configurations.runtime.fileCollection { Dependency dep ->
|
||||
return dep.name == 'slf4j-api' && project.configurations.compile.dependencies.contains(dep)
|
||||
}
|
||||
// extra files for the plugin to go into the zip
|
||||
from('src/main/packaging') // TODO: move all config/bin/_size/etc into packaging
|
||||
from('src/main') {
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.gradle.plugin
|
|||
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.Optional
|
||||
|
||||
/**
|
||||
* A container for plugin properties that will be written to the plugin descriptor, for easy
|
||||
|
|
|
@ -18,7 +18,9 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
|
|
|
@ -61,6 +61,7 @@ public class ForbiddenPatternsTask extends DefaultTask {
|
|||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
patterns.put('wildcard imports', /^\s*import.*\.\*/)
|
||||
|
||||
inputs.property("excludes", filesFilter.excludes)
|
||||
inputs.property("rules", patterns)
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.apache.rat.anttasks.Report
|
||||
import org.apache.rat.anttasks.SubstringLicenseMatcher
|
||||
import org.apache.rat.license.SimpleLicenseFamily
|
||||
import org.elasticsearch.gradle.AntTask
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
|
||||
import java.nio.file.Files
|
||||
|
||||
/**
|
||||
* Checks files for license headers.
|
||||
* <p>
|
||||
* This is a port of the apache lucene check
|
||||
*/
|
||||
public class LicenseHeadersTask extends AntTask {
|
||||
|
||||
LicenseHeadersTask() {
|
||||
description = "Checks sources for missing, incorrect, or unacceptable license headers"
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runAnt(AntBuilder ant) {
|
||||
ant.project.addTaskDefinition('ratReport', Report)
|
||||
ant.project.addDataTypeDefinition('substringMatcher', SubstringLicenseMatcher)
|
||||
ant.project.addDataTypeDefinition('approvedLicense', SimpleLicenseFamily)
|
||||
|
||||
// create a file for the log to go to under reports/
|
||||
File reportDir = new File(project.buildDir, "reports/licenseHeaders")
|
||||
reportDir.mkdirs()
|
||||
File reportFile = new File(reportDir, "rat.log")
|
||||
Files.deleteIfExists(reportFile.toPath())
|
||||
|
||||
// run rat, going to the file
|
||||
ant.ratReport(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) {
|
||||
// checks all the java sources (allJava)
|
||||
for (SourceSet set : project.sourceSets) {
|
||||
for (File dir : set.allJava.srcDirs) {
|
||||
// sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main...
|
||||
if (dir.exists()) {
|
||||
ant.fileset(dir: dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BSD 4-clause stuff (is disallowed below)
|
||||
// we keep this here, in case someone adds BSD code for some reason, it should never be allowed.
|
||||
substringMatcher(licenseFamilyCategory: "BSD4 ",
|
||||
licenseFamilyName: "Original BSD License (with advertising clause)") {
|
||||
pattern(substring: "All advertising materials")
|
||||
}
|
||||
|
||||
// Apache
|
||||
substringMatcher(licenseFamilyCategory: "AL ",
|
||||
licenseFamilyName: "Apache") {
|
||||
// Apache license (ES)
|
||||
pattern(substring: "Licensed to Elasticsearch under one or more contributor")
|
||||
// Apache license (ASF)
|
||||
pattern(substring: "Licensed to the Apache Software Foundation (ASF) under")
|
||||
// this is the old-school one under some files
|
||||
pattern(substring: "Licensed under the Apache License, Version 2.0 (the \"License\")")
|
||||
}
|
||||
|
||||
// Generated resources
|
||||
substringMatcher(licenseFamilyCategory: "GEN ",
|
||||
licenseFamilyName: "Generated") {
|
||||
// parsers generated by antlr
|
||||
pattern(substring: "ANTLR GENERATED CODE")
|
||||
}
|
||||
|
||||
// approved categories
|
||||
approvedLicense(familyName: "Apache")
|
||||
approvedLicense(familyName: "Generated")
|
||||
}
|
||||
|
||||
// check the license file for any errors, this should be fast.
|
||||
boolean zeroUnknownLicenses = false
|
||||
boolean foundProblemsWithFiles = false
|
||||
reportFile.eachLine('UTF-8') { line ->
|
||||
if (line.startsWith("0 Unknown Licenses")) {
|
||||
zeroUnknownLicenses = true
|
||||
}
|
||||
|
||||
if (line.startsWith(" !")) {
|
||||
foundProblemsWithFiles = true
|
||||
}
|
||||
}
|
||||
|
||||
if (zeroUnknownLicenses == false || foundProblemsWithFiles) {
|
||||
// print the unapproved license section, usually its all you need to fix problems.
|
||||
int sectionNumber = 0
|
||||
reportFile.eachLine('UTF-8') { line ->
|
||||
if (line.startsWith("*******************************")) {
|
||||
sectionNumber++
|
||||
} else {
|
||||
if (sectionNumber == 2) {
|
||||
logger.error(line)
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException("License header problems were found! Full details: " + reportFile.absolutePath)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -34,7 +34,9 @@ class PrecommitTasks {
|
|||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class)]
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)]
|
||||
|
||||
// tasks with just tests don't need dependency licenses, so this flag makes adding
|
||||
// the task optional
|
||||
|
|
|
@ -0,0 +1,258 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit;
|
||||
|
||||
import org.apache.tools.ant.BuildEvent;
|
||||
import org.apache.tools.ant.BuildException;
|
||||
import org.apache.tools.ant.BuildListener;
|
||||
import org.apache.tools.ant.BuildLogger;
|
||||
import org.apache.tools.ant.DefaultLogger;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.elasticsearch.gradle.AntTask;
|
||||
import org.gradle.api.artifacts.Configuration;
|
||||
import org.gradle.api.file.FileCollection;
|
||||
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Basic static checking to keep tabs on third party JARs
|
||||
*/
|
||||
public class ThirdPartyAuditTask extends AntTask {
|
||||
|
||||
// patterns for classes to exclude, because we understand their issues
|
||||
private String[] excludes = new String[0];
|
||||
|
||||
ThirdPartyAuditTask() {
|
||||
// we depend on this because its the only reliable configuration
|
||||
// this probably makes the build slower: gradle you suck here when it comes to configurations, you pay the price.
|
||||
dependsOn(project.configurations.testCompile);
|
||||
description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'";
|
||||
}
|
||||
|
||||
/**
|
||||
* classes that should be excluded from the scan,
|
||||
* e.g. because we know what sheisty stuff those particular classes are up to.
|
||||
*/
|
||||
public void setExcludes(String[] classes) {
|
||||
for (String s : classes) {
|
||||
if (s.indexOf('*') != -1) {
|
||||
throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!");
|
||||
}
|
||||
}
|
||||
excludes = classes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current list of exclusions.
|
||||
*/
|
||||
public String[] getExcludes() {
|
||||
return excludes;
|
||||
}
|
||||
|
||||
// yes, we parse Uwe Schindler's errors to find missing classes, and to keep a continuous audit. Just don't let him know!
|
||||
static final Pattern MISSING_CLASS_PATTERN =
|
||||
Pattern.compile(/WARNING: The referenced class '(.*)' cannot be loaded\. Please fix the classpath\!/);
|
||||
|
||||
static final Pattern VIOLATION_PATTERN =
|
||||
Pattern.compile(/\s\sin ([a-zA-Z0-9\$\.]+) \(.*\)/);
|
||||
|
||||
// we log everything and capture errors and handle them with our whitelist
|
||||
// this is important, as we detect stale whitelist entries, workaround forbidden apis bugs,
|
||||
// and it also allows whitelisting missing classes!
|
||||
static class EvilLogger extends DefaultLogger {
|
||||
final Set<String> missingClasses = new TreeSet<>();
|
||||
final Map<String,List<String>> violations = new TreeMap<>();
|
||||
String previousLine = null;
|
||||
|
||||
@Override
|
||||
public void messageLogged(BuildEvent event) {
|
||||
if (event.getTask().getClass() == de.thetaphi.forbiddenapis.ant.AntTask.class) {
|
||||
if (event.getPriority() == Project.MSG_WARN) {
|
||||
Matcher m = MISSING_CLASS_PATTERN.matcher(event.getMessage());
|
||||
if (m.matches()) {
|
||||
missingClasses.add(m.group(1).replace('.', '/') + ".class");
|
||||
}
|
||||
} else if (event.getPriority() == Project.MSG_ERR) {
|
||||
Matcher m = VIOLATION_PATTERN.matcher(event.getMessage());
|
||||
if (m.matches()) {
|
||||
String violation = previousLine + '\n' + event.getMessage();
|
||||
String clazz = m.group(1).replace('.', '/') + ".class";
|
||||
List<String> current = violations.get(clazz);
|
||||
if (current == null) {
|
||||
current = new ArrayList<>();
|
||||
violations.put(clazz, current);
|
||||
}
|
||||
current.add(violation);
|
||||
}
|
||||
previousLine = event.getMessage();
|
||||
}
|
||||
}
|
||||
super.messageLogged(event);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BuildLogger makeLogger(PrintStream stream, int outputLevel) {
|
||||
DefaultLogger log = new EvilLogger();
|
||||
log.errorPrintStream = stream;
|
||||
log.outputPrintStream = stream;
|
||||
log.messageOutputLevel = outputLevel;
|
||||
return log;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runAnt(AntBuilder ant) {
|
||||
Configuration configuration = project.configurations.findByName('runtime');
|
||||
if (configuration == null) {
|
||||
// some projects apparently do not have 'runtime'? what a nice inconsistency,
|
||||
// basically only serves to waste time in build logic!
|
||||
configuration = project.configurations.findByName('testCompile');
|
||||
}
|
||||
assert configuration != null;
|
||||
ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask);
|
||||
|
||||
// we only want third party dependencies.
|
||||
FileCollection jars = configuration.fileCollection({ dependency ->
|
||||
dependency.group.startsWith("org.elasticsearch") == false
|
||||
});
|
||||
|
||||
// we don't want provided dependencies, which we have already scanned. e.g. don't
|
||||
// scan ES core's dependencies for every single plugin
|
||||
Configuration provided = project.configurations.findByName('provided');
|
||||
if (provided != null) {
|
||||
jars -= provided;
|
||||
}
|
||||
|
||||
// no dependencies matched, we are done
|
||||
if (jars.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// print which jars we are going to scan, always
|
||||
// this is not the time to try to be succinct! Forbidden will print plenty on its own!
|
||||
Set<String> names = new TreeSet<>();
|
||||
for (File jar : jars) {
|
||||
names.add(jar.getName());
|
||||
}
|
||||
|
||||
// TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first,
|
||||
// and then remove our temp dir afterwards. don't complain: try it yourself.
|
||||
// we don't use gradle temp dir handling, just google it, or try it yourself.
|
||||
|
||||
File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit');
|
||||
|
||||
// clean up any previous mess (if we failed), then unzip everything to one directory
|
||||
ant.delete(dir: tmpDir.getAbsolutePath());
|
||||
tmpDir.mkdirs();
|
||||
for (File jar : jars) {
|
||||
ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath());
|
||||
}
|
||||
|
||||
// convert exclusion class names to binary file names
|
||||
String[] excludedFiles = new String[excludes.length];
|
||||
for (int i = 0; i < excludes.length; i++) {
|
||||
excludedFiles[i] = excludes[i].replace('.', '/') + ".class";
|
||||
}
|
||||
Set<String> excludedSet = new TreeSet<>(Arrays.asList(excludedFiles));
|
||||
|
||||
// jarHellReprise
|
||||
Set<String> sheistySet = getSheistyClasses(tmpDir.toPath());
|
||||
|
||||
try {
|
||||
ant.thirdPartyAudit(internalRuntimeForbidden: false,
|
||||
failOnUnsupportedJava: false,
|
||||
failOnMissingClasses: false,
|
||||
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
|
||||
classpath: configuration.asPath) {
|
||||
fileset(dir: tmpDir)
|
||||
}
|
||||
} catch (BuildException ignore) {}
|
||||
|
||||
EvilLogger evilLogger = null;
|
||||
for (BuildListener listener : ant.project.getBuildListeners()) {
|
||||
if (listener instanceof EvilLogger) {
|
||||
evilLogger = (EvilLogger) listener;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert evilLogger != null;
|
||||
|
||||
// keep our whitelist up to date
|
||||
Set<String> bogusExclusions = new TreeSet<>(excludedSet);
|
||||
bogusExclusions.removeAll(sheistySet);
|
||||
bogusExclusions.removeAll(evilLogger.missingClasses);
|
||||
bogusExclusions.removeAll(evilLogger.violations.keySet());
|
||||
if (!bogusExclusions.isEmpty()) {
|
||||
throw new IllegalStateException("Invalid exclusions, nothing is wrong with these classes: " + bogusExclusions);
|
||||
}
|
||||
|
||||
// don't duplicate classes with the JDK
|
||||
sheistySet.removeAll(excludedSet);
|
||||
if (!sheistySet.isEmpty()) {
|
||||
throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet);
|
||||
}
|
||||
|
||||
// don't allow a broken classpath
|
||||
evilLogger.missingClasses.removeAll(excludedSet);
|
||||
if (!evilLogger.missingClasses.isEmpty()) {
|
||||
throw new IllegalStateException("CLASSES ARE MISSING! " + evilLogger.missingClasses);
|
||||
}
|
||||
|
||||
// don't use internal classes
|
||||
evilLogger.violations.keySet().removeAll(excludedSet);
|
||||
if (!evilLogger.violations.isEmpty()) {
|
||||
throw new IllegalStateException("VIOLATIONS WERE FOUND! " + evilLogger.violations);
|
||||
}
|
||||
|
||||
// clean up our mess (if we succeed)
|
||||
ant.delete(dir: tmpDir.getAbsolutePath());
|
||||
}
|
||||
|
||||
/**
|
||||
* check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk!
|
||||
*/
|
||||
private Set<String> getSheistyClasses(Path root) {
|
||||
// system.parent = extensions loader.
|
||||
// note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!).
|
||||
// but groovy/gradle needs to work at all first!
|
||||
ClassLoader ext = ClassLoader.getSystemClassLoader().getParent();
|
||||
assert ext != null;
|
||||
|
||||
Set<String> sheistySet = new TreeSet<>();
|
||||
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
String entry = root.relativize(file).toString().replace('\\', '/');
|
||||
if (entry.endsWith(".class")) {
|
||||
if (ext.getResource(entry) != null) {
|
||||
sheistySet.add(entry);
|
||||
}
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
return sheistySet;
|
||||
}
|
||||
}
|
|
@ -23,11 +23,18 @@ import org.apache.tools.ant.taskdefs.condition.Os
|
|||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.AntBuilder
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.api.tasks.Copy
|
||||
import org.gradle.api.tasks.Delete
|
||||
import org.gradle.api.tasks.Exec
|
||||
|
||||
import java.nio.file.Paths
|
||||
|
||||
|
|
|
@ -0,0 +1,287 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.AntTask
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
/**
|
||||
* A fixture for integration tests which runs in a separate process.
|
||||
*/
|
||||
public class Fixture extends AntTask {
|
||||
|
||||
/** The path to the executable that starts the fixture. */
|
||||
@Input
|
||||
String executable
|
||||
|
||||
private final List<Object> arguments = new ArrayList<>()
|
||||
|
||||
@Input
|
||||
public void args(Object... args) {
|
||||
arguments.addAll(args)
|
||||
}
|
||||
|
||||
/**
|
||||
* Environment variables for the fixture process. The value can be any object, which
|
||||
* will have toString() called at execution time.
|
||||
*/
|
||||
private final Map<String, Object> environment = new HashMap<>()
|
||||
|
||||
@Input
|
||||
public void env(String key, Object value) {
|
||||
environment.put(key, value)
|
||||
}
|
||||
|
||||
/** A flag to indicate whether the command should be executed from a shell. */
|
||||
@Input
|
||||
boolean useShell = false
|
||||
|
||||
/**
|
||||
* A flag to indicate whether the fixture should be run in the foreground, or spawned.
|
||||
* It is protected so subclasses can override (eg RunTask).
|
||||
*/
|
||||
protected boolean spawn = true
|
||||
|
||||
/**
|
||||
* A closure to call before the fixture is considered ready. The closure is passed the fixture object,
|
||||
* as well as a groovy AntBuilder, to enable running ant condition checks. The default wait
|
||||
* condition is for http on the http port.
|
||||
*/
|
||||
@Input
|
||||
Closure waitCondition = { Fixture fixture, AntBuilder ant ->
|
||||
File tmpFile = new File(fixture.cwd, 'wait.success')
|
||||
ant.get(src: "http://${fixture.addressAndPort}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging information can be flushed
|
||||
retries: 10)
|
||||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
/** A task which will stop this fixture. This should be used as a finalizedBy for any tasks that use the fixture. */
|
||||
public final Task stopTask
|
||||
|
||||
public Fixture() {
|
||||
stopTask = createStopTask()
|
||||
finalizedBy(stopTask)
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void runAnt(AntBuilder ant) {
|
||||
project.delete(baseDir) // reset everything
|
||||
cwd.mkdirs()
|
||||
final String realExecutable
|
||||
final List<Object> realArgs = new ArrayList<>()
|
||||
final Map<String, Object> realEnv = environment
|
||||
// We need to choose which executable we are using. In shell mode, or when we
|
||||
// are spawning and thus using the wrapper script, the executable is the shell.
|
||||
if (useShell || spawn) {
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
realExecutable = 'cmd'
|
||||
realArgs.add('/C')
|
||||
realArgs.add('"') // quote the entire command
|
||||
} else {
|
||||
realExecutable = 'sh'
|
||||
}
|
||||
} else {
|
||||
realExecutable = executable
|
||||
realArgs.addAll(arguments)
|
||||
}
|
||||
if (spawn) {
|
||||
writeWrapperScript(executable)
|
||||
realArgs.add(wrapperScript)
|
||||
realArgs.addAll(arguments)
|
||||
}
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS) && (useShell || spawn)) {
|
||||
realArgs.add('"')
|
||||
}
|
||||
commandString.eachLine { line -> logger.info(line) }
|
||||
|
||||
ant.exec(executable: realExecutable, spawn: spawn, dir: cwd, taskname: name) {
|
||||
realEnv.each { key, value -> env(key: key, value: value) }
|
||||
realArgs.each { arg(value: it) }
|
||||
}
|
||||
|
||||
String failedProp = "failed${name}"
|
||||
// first wait for resources, or the failure marker from the wrapper script
|
||||
ant.waitfor(maxwait: '30', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond', timeoutproperty: failedProp) {
|
||||
or {
|
||||
resourceexists {
|
||||
file(file: failureMarker.toString())
|
||||
}
|
||||
and {
|
||||
resourceexists {
|
||||
file(file: pidFile.toString())
|
||||
}
|
||||
resourceexists {
|
||||
file(file: portsFile.toString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ant.project.getProperty(failedProp) || failureMarker.exists()) {
|
||||
fail("Failed to start ${name}")
|
||||
}
|
||||
|
||||
// the process is started (has a pid) and is bound to a network interface
|
||||
// so now wait undil the waitCondition has been met
|
||||
// TODO: change this to a loop?
|
||||
boolean success
|
||||
try {
|
||||
success = waitCondition(this, ant) == false
|
||||
} catch (Exception e) {
|
||||
String msg = "Wait condition caught exception for ${name}"
|
||||
logger.error(msg, e)
|
||||
fail(msg, e)
|
||||
}
|
||||
if (success == false) {
|
||||
fail("Wait condition failed for ${name}")
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns a debug string used to log information about how the fixture was run. */
|
||||
protected String getCommandString() {
|
||||
String commandString = "\n${name} configuration:\n"
|
||||
commandString += "-----------------------------------------\n"
|
||||
commandString += " cwd: ${cwd}\n"
|
||||
commandString += " command: ${executable} ${arguments.join(' ')}\n"
|
||||
commandString += ' environment:\n'
|
||||
environment.each { k, v -> commandString += " ${k}: ${v}\n" }
|
||||
if (spawn) {
|
||||
commandString += "\n [${wrapperScript.name}]\n"
|
||||
wrapperScript.eachLine('UTF-8', { line -> commandString += " ${line}\n"})
|
||||
}
|
||||
return commandString
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a script to run the real executable, so that stdout/stderr can be captured.
|
||||
* TODO: this could be removed if we do use our own ProcessBuilder and pump output from the process
|
||||
*/
|
||||
private void writeWrapperScript(String executable) {
|
||||
wrapperScript.parentFile.mkdirs()
|
||||
String argsPasser = '"$@"'
|
||||
String exitMarker = "; if [ \$? != 0 ]; then touch run.failed; fi"
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
argsPasser = '%*'
|
||||
exitMarker = "\r\n if \"%errorlevel%\" neq \"0\" ( type nul >> run.failed )"
|
||||
}
|
||||
wrapperScript.setText("\"${executable}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Fail the build with the given message, and logging relevant info*/
|
||||
private void fail(String msg, Exception... suppressed) {
|
||||
if (logger.isInfoEnabled() == false) {
|
||||
// We already log the command at info level. No need to do it twice.
|
||||
commandString.eachLine { line -> logger.error(line) }
|
||||
}
|
||||
logger.error("${name} output:")
|
||||
logger.error("-----------------------------------------")
|
||||
logger.error(" failure marker exists: ${failureMarker.exists()}")
|
||||
logger.error(" pid file exists: ${pidFile.exists()}")
|
||||
logger.error(" ports file exists: ${portsFile.exists()}")
|
||||
// also dump the log file for the startup script (which will include ES logging output to stdout)
|
||||
if (runLog.exists()) {
|
||||
logger.error("\n [log]")
|
||||
runLog.eachLine { line -> logger.error(" ${line}") }
|
||||
}
|
||||
logger.error("-----------------------------------------")
|
||||
GradleException toThrow = new GradleException(msg)
|
||||
for (Exception e : suppressed) {
|
||||
toThrow.addSuppressed(e)
|
||||
}
|
||||
throw toThrow
|
||||
}
|
||||
|
||||
/** Adds a task to kill an elasticsearch node with the given pidfile */
|
||||
private Task createStopTask() {
|
||||
final Fixture fixture = this
|
||||
final Object pid = "${ -> fixture.pid }"
|
||||
Exec stop = project.tasks.create(name: "${name}#stop", type: LoggedExec)
|
||||
stop.onlyIf { fixture.pidFile.exists() }
|
||||
stop.doFirst {
|
||||
logger.info("Shutting down ${fixture.name} with pid ${pid}")
|
||||
}
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
stop.executable = 'Taskkill'
|
||||
stop.args('/PID', pid, '/F')
|
||||
} else {
|
||||
stop.executable = 'kill'
|
||||
stop.args('-9', pid)
|
||||
}
|
||||
stop.doLast {
|
||||
project.delete(fixture.pidFile)
|
||||
}
|
||||
return stop
|
||||
}
|
||||
|
||||
/**
|
||||
* A path relative to the build dir that all configuration and runtime files
|
||||
* will live in for this fixture
|
||||
*/
|
||||
protected File getBaseDir() {
|
||||
return new File(project.buildDir, "fixtures/${name}")
|
||||
}
|
||||
|
||||
/** Returns the working directory for the process. Defaults to "cwd" inside baseDir. */
|
||||
protected File getCwd() {
|
||||
return new File(baseDir, 'cwd')
|
||||
}
|
||||
|
||||
/** Returns the file the process writes its pid to. Defaults to "pid" inside baseDir. */
|
||||
protected File getPidFile() {
|
||||
return new File(baseDir, 'pid')
|
||||
}
|
||||
|
||||
/** Reads the pid file and returns the process' pid */
|
||||
public int getPid() {
|
||||
return Integer.parseInt(pidFile.getText('UTF-8').trim())
|
||||
}
|
||||
|
||||
/** Returns the file the process writes its bound ports to. Defaults to "ports" inside baseDir. */
|
||||
protected File getPortsFile() {
|
||||
return new File(baseDir, 'ports')
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this node over http */
|
||||
public String getAddressAndPort() {
|
||||
return portsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns a file that wraps around the actual command when {@code spawn == true}. */
|
||||
protected File getWrapperScript() {
|
||||
return new File(cwd, Os.isFamily(Os.FAMILY_WINDOWS) ? 'run.bat' : 'run')
|
||||
}
|
||||
|
||||
/** Returns a file that the wrapper script writes when the command failed. */
|
||||
protected File getFailureMarker() {
|
||||
return new File(cwd, 'run.failed')
|
||||
}
|
||||
|
||||
/** Returns a file that the wrapper script writes when the command failed. */
|
||||
protected File getRunLog() {
|
||||
return new File(cwd, 'run.log')
|
||||
}
|
||||
}
|
|
@ -20,7 +20,7 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
@ -82,4 +82,25 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
public ClusterConfiguration getCluster() {
|
||||
return clusterConfig
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task dependsOn(Object... dependencies) {
|
||||
super.dependsOn(dependencies)
|
||||
for (Object dependency : dependencies) {
|
||||
if (dependency instanceof Fixture) {
|
||||
finalizedBy(((Fixture)dependency).stopTask)
|
||||
}
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDependsOn(Iterable<?> dependencies) {
|
||||
super.setDependsOn(dependencies)
|
||||
for (Object dependency : dependencies) {
|
||||
if (dependency instanceof Fixture) {
|
||||
finalizedBy(((Fixture)dependency).stopTask)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package org.elasticsearch.gradle.test
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.internal.tasks.options.Option
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks
|
|||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.plugins.ide.eclipse.model.EclipseClasspath
|
||||
|
||||
/** Configures the build to have a rest integration test. */
|
||||
public class StandaloneTestBasePlugin implements Plugin<Project> {
|
||||
|
@ -42,7 +41,7 @@ public class StandaloneTestBasePlugin implements Plugin<Project> {
|
|||
|
||||
// only setup tests to build
|
||||
project.sourceSets.create('test')
|
||||
project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}")
|
||||
project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}")
|
||||
|
||||
project.eclipse.classpath.sourceSets = [project.sourceSets.test]
|
||||
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.logging.ProgressLogger
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
import org.gradle.process.internal.ExecAction
|
||||
import org.gradle.process.internal.ExecActionFactory
|
||||
|
|
|
@ -19,8 +19,7 @@
|
|||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.*
|
||||
import org.gradle.logging.ProgressLogger
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.logging.ProgressLoggerFactory
|
||||
import org.gradle.process.internal.ExecAction
|
||||
import org.gradle.process.internal.ExecActionFactory
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on
|
||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
# either express or implied. See the License for the specific
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
# Checks that we run against bytecode of third-party dependencies
|
||||
#
|
||||
# Be judicious about what is denied here: MANY classes will be subject
|
||||
# to these rules, so please try to keep the false positive rate low!
|
||||
#
|
||||
# Each third party .class failing checks will need to be explicitly
|
||||
# listed in the module's build.gradle file:
|
||||
#
|
||||
# thirdPartyAudit.excludes = [
|
||||
# // uses internal java api: sun.misc.Unsafe
|
||||
# 'org.foo.Bar',
|
||||
# // missing class!
|
||||
# 'com.missing.dependency.WTF',
|
||||
# // ...
|
||||
# ]
|
||||
#
|
||||
# Wildcards are not allowed, excludes must be exact. The build also fails with
|
||||
# the message "Invalid exclusions, nothing is wrong with these classes" if
|
||||
# extraneous classes are in the excludes list, this ensures the list is
|
||||
# up-to-date, and that each module accurately documents the evil things
|
||||
# that its dependencies do.
|
||||
#
|
||||
# For more information, look at ThirdPartyAuditTask.groovy in buildSrc/
|
||||
|
||||
#
|
||||
# Ruleset to fail on java internal apis, using this logic:
|
||||
# http://docs.oracle.com/javase/8/docs/api/java/lang/SecurityManager.html#checkPackageAccess-java.lang.String-
|
||||
#
|
||||
# // The list may change at any time, regenerated with:
|
||||
# for (String pkg : new TreeSet<>(Arrays.asList(
|
||||
# Security.getProperty("package.access").split(",")))) {
|
||||
# System.out.println(pkg + "**");
|
||||
# }
|
||||
#
|
||||
@defaultMessage non-public internal runtime class
|
||||
com.oracle.webservices.internal.**
|
||||
com.oracle.xmlns.internal.**
|
||||
com.sun.activation.registries.**
|
||||
com.sun.browser.**
|
||||
com.sun.corba.se.**
|
||||
com.sun.glass.**
|
||||
com.sun.imageio.**
|
||||
com.sun.istack.internal.**
|
||||
com.sun.javafx.**
|
||||
com.sun.jmx.**
|
||||
com.sun.media.**
|
||||
com.sun.media.sound.**
|
||||
com.sun.naming.internal.**
|
||||
com.sun.openpisces.**
|
||||
com.sun.org.apache.bcel.internal.**
|
||||
com.sun.org.apache.regexp.internal.**
|
||||
com.sun.org.apache.xalan.internal.extensions.**
|
||||
com.sun.org.apache.xalan.internal.lib.**
|
||||
com.sun.org.apache.xalan.internal.res.**
|
||||
com.sun.org.apache.xalan.internal.templates.**
|
||||
com.sun.org.apache.xalan.internal.utils.**
|
||||
com.sun.org.apache.xalan.internal.xslt.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.cmdline.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.compiler.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.trax.**
|
||||
com.sun.org.apache.xalan.internal.xsltc.util.**
|
||||
com.sun.org.apache.xerces.internal.**
|
||||
com.sun.org.apache.xml.internal.res.**
|
||||
com.sun.org.apache.xml.internal.security.**
|
||||
com.sun.org.apache.xml.internal.serializer.utils.**
|
||||
com.sun.org.apache.xml.internal.utils.**
|
||||
com.sun.org.apache.xpath.internal.**
|
||||
com.sun.org.glassfish.**
|
||||
com.sun.pisces.**
|
||||
com.sun.prism.**
|
||||
com.sun.proxy.**
|
||||
com.sun.scenario.**
|
||||
com.sun.t2k.**
|
||||
com.sun.webkit.**
|
||||
com.sun.xml.internal.**
|
||||
jdk.internal.**
|
||||
jdk.management.resource.internal.**
|
||||
jdk.nashorn.internal.**
|
||||
jdk.nashorn.tools.**
|
||||
oracle.jrockit.jfr.**
|
||||
org.jcp.xml.dsig.internal.**
|
||||
sun.**
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 3.0.0-SNAPSHOT
|
||||
lucene = 5.5.0-snapshot-1719088
|
||||
lucene = 5.5.0-snapshot-1721183
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.5
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.test.RestSpecHack
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'com.bmuschko.nexus'
|
||||
|
@ -82,7 +82,7 @@ dependencies {
|
|||
compile "net.java.dev.jna:jna:${versions.jna}", optional
|
||||
|
||||
if (isEclipse == false || project.path == ":core-tests") {
|
||||
testCompile("org.elasticsearch:test-framework:${version}") {
|
||||
testCompile("org.elasticsearch.test:framework:${version}") {
|
||||
// tests use the locally compiled version of core
|
||||
exclude group: 'org.elasticsearch', module: 'elasticsearch'
|
||||
}
|
||||
|
@ -111,6 +111,123 @@ forbiddenPatterns {
|
|||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
|
||||
// classes are missing!
|
||||
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder (netty)
|
||||
'com.google.protobuf.CodedInputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender (netty)
|
||||
'com.google.protobuf.CodedOutputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufDecoder (netty)
|
||||
'com.google.protobuf.ExtensionRegistry',
|
||||
'com.google.protobuf.MessageLite$Builder',
|
||||
'com.google.protobuf.MessageLite',
|
||||
'com.google.protobuf.Parser',
|
||||
|
||||
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageListener',
|
||||
'javax.jms.ObjectMessage',
|
||||
'javax.jms.TopicConnection',
|
||||
'javax.jms.TopicConnectionFactory',
|
||||
'javax.jms.TopicPublisher',
|
||||
'javax.jms.TopicSession',
|
||||
'javax.jms.TopicSubscriber',
|
||||
|
||||
// from org.apache.log4j.net.SMTPAppender (log4j)
|
||||
'javax.mail.Authenticator',
|
||||
'javax.mail.Message$RecipientType',
|
||||
'javax.mail.Message',
|
||||
'javax.mail.Multipart',
|
||||
'javax.mail.PasswordAuthentication',
|
||||
'javax.mail.Session',
|
||||
'javax.mail.Transport',
|
||||
'javax.mail.internet.InternetAddress',
|
||||
'javax.mail.internet.InternetHeaders',
|
||||
'javax.mail.internet.MimeBodyPart',
|
||||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
|
||||
// from org.jboss.netty.channel.socket.http.HttpTunnelingServlet (netty)
|
||||
'javax.servlet.ServletConfig',
|
||||
'javax.servlet.ServletException',
|
||||
'javax.servlet.ServletOutputStream',
|
||||
'javax.servlet.http.HttpServlet',
|
||||
'javax.servlet.http.HttpServletRequest',
|
||||
'javax.servlet.http.HttpServletResponse',
|
||||
|
||||
// from org.jboss.netty.logging.CommonsLoggerFactory (netty)
|
||||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
|
||||
'org.apache.regexp.CharacterIterator',
|
||||
'org.apache.regexp.RE',
|
||||
'org.apache.regexp.REProgram',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
'org.apache.tomcat.jni.Pool',
|
||||
'org.apache.tomcat.jni.SSL',
|
||||
'org.apache.tomcat.jni.SSLContext',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
|
||||
'org.bouncycastle.asn1.x500.X500Name',
|
||||
'org.bouncycastle.cert.X509v3CertificateBuilder',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
|
||||
'org.bouncycastle.jce.provider.BouncyCastleProvider',
|
||||
'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.JettyNpnSslEngine (netty)
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego',
|
||||
|
||||
// from org.jboss.netty.logging.JBossLoggerFactory (netty)
|
||||
'org.jboss.logging.Logger',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
|
||||
'org.jboss.marshalling.ByteInput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
|
||||
'org.jboss.marshalling.ByteOutput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
|
||||
'org.jboss.marshalling.Marshaller',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
|
||||
'org.jboss.marshalling.MarshallerFactory',
|
||||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from com.spatial4j.core.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
'org.osgi.framework.BundleActivator',
|
||||
'org.osgi.framework.BundleContext',
|
||||
|
||||
// from org.jboss.netty.logging.OsgiLoggerFactory$1 (netty)
|
||||
'org.osgi.framework.ServiceReference',
|
||||
'org.osgi.service.log.LogService',
|
||||
'org.osgi.util.tracker.ServiceTracker',
|
||||
'org.osgi.util.tracker.ServiceTrackerCustomizer',
|
||||
|
||||
'org.slf4j.impl.StaticMDCBinder',
|
||||
'org.slf4j.impl.StaticMarkerBinder',
|
||||
]
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
|
|
|
@ -18,9 +18,19 @@
|
|||
*/
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermContext;
|
||||
import org.apache.lucene.index.TermState;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
@ -44,7 +54,7 @@ import java.util.Objects;
|
|||
* While aggregating the total term frequency is trivial since it
|
||||
* can be summed up not every {@link org.apache.lucene.search.similarities.Similarity}
|
||||
* makes use of this statistic. The document frequency which is used in the
|
||||
* {@link org.apache.lucene.search.similarities.DefaultSimilarity}
|
||||
* {@link org.apache.lucene.search.similarities.ClassicSimilarity}
|
||||
* can only be estimated as an lower-bound since it is a document based statistic. For
|
||||
* the document frequency the maximum frequency across all fields per term is used
|
||||
* which is the minimum number of documents the terms occurs in.
|
||||
|
|
|
@ -23,7 +23,14 @@ import org.apache.lucene.analysis.Analyzer;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.automaton.RegExp;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -35,7 +42,12 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.query.support.QueryParsers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
|
||||
|
@ -214,7 +226,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
if (query == null) {
|
||||
query = super.getFieldQuery(currentFieldType.names().indexName(), queryText, quoted);
|
||||
query = super.getFieldQuery(currentFieldType.name(), queryText, quoted);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
@ -454,7 +466,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
query = currentFieldType.prefixQuery(termStr, multiTermRewriteMethod, context);
|
||||
}
|
||||
if (query == null) {
|
||||
query = getPossiblyAnalyzedPrefixQuery(currentFieldType.names().indexName(), termStr);
|
||||
query = getPossiblyAnalyzedPrefixQuery(currentFieldType.name(), termStr);
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
@ -580,7 +592,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (!settings.forceAnalyzer()) {
|
||||
setAnalyzer(context.getSearchAnalyzer(currentFieldType));
|
||||
}
|
||||
indexedNameField = currentFieldType.names().indexName();
|
||||
indexedNameField = currentFieldType.name();
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
}
|
||||
return getPossiblyAnalyzedWildcardQuery(indexedNameField, termStr);
|
||||
|
|
|
@ -22,7 +22,11 @@ package org.apache.lucene.search.vectorhighlight;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.BlendedTermQuery;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
|
||||
|
|
|
@ -30,7 +30,13 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
|
|
@ -268,13 +268,19 @@ public class Version {
|
|||
public static final int V_2_0_1_ID = 2000199;
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_2_ID = 2000299;
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_3_ID = 2000399;
|
||||
public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_2_ID = 2010299;
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
|
||||
public static final int V_3_0_0_ID = 3000099;
|
||||
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final Version CURRENT = V_3_0_0;
|
||||
|
@ -291,12 +297,18 @@ public class Version {
|
|||
switch (id) {
|
||||
case V_3_0_0_ID:
|
||||
return V_3_0_0;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_2_ID:
|
||||
return V_2_1_2;
|
||||
case V_2_1_1_ID:
|
||||
return V_2_1_1;
|
||||
case V_2_1_0_ID:
|
||||
return V_2_1_0;
|
||||
case V_2_0_3_ID:
|
||||
return V_2_0_3;
|
||||
case V_2_0_2_ID:
|
||||
return V_2_0_2;
|
||||
case V_2_0_1_ID:
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.concurrent.Future;
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction
|
|||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
|
||||
|
@ -58,6 +60,8 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
|
|||
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction;
|
||||
|
@ -79,7 +83,9 @@ import org.elasticsearch.action.admin.indices.exists.indices.TransportIndicesExi
|
|||
import org.elasticsearch.action.admin.indices.exists.types.TransportTypesExistsAction;
|
||||
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.TransportForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||
|
@ -121,8 +127,6 @@ import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeAction;
|
|||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.TransportValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.delete.TransportDeleteWarmerAction;
|
||||
import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersAction;
|
||||
|
@ -253,6 +257,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
|
||||
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
|
@ -293,6 +298,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
|
||||
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
|
||||
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
|
||||
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
|
||||
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
|
|
@ -62,4 +62,12 @@ public interface DocumentRequest<T> extends IndicesRequest {
|
|||
* @return the Routing
|
||||
*/
|
||||
String routing();
|
||||
|
||||
|
||||
/**
|
||||
* Get the parent for this request
|
||||
* @return the Parent
|
||||
*/
|
||||
String parent();
|
||||
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.ExceptionsHelper.detailedMessage;
|
||||
|
||||
/**
|
||||
* Information about task operation failures
|
||||
*
|
||||
* The class is final due to serialization limitations
|
||||
*/
|
||||
public final class TaskOperationFailure implements Writeable<TaskOperationFailure>, ToXContent {
|
||||
|
||||
private final String nodeId;
|
||||
|
||||
private final long taskId;
|
||||
|
||||
private final Throwable reason;
|
||||
|
||||
private final RestStatus status;
|
||||
|
||||
public TaskOperationFailure(StreamInput in) throws IOException {
|
||||
nodeId = in.readString();
|
||||
taskId = in.readLong();
|
||||
reason = in.readThrowable();
|
||||
status = RestStatus.readFrom(in);
|
||||
}
|
||||
|
||||
public TaskOperationFailure(String nodeId, long taskId, Throwable t) {
|
||||
this.nodeId = nodeId;
|
||||
this.taskId = taskId;
|
||||
this.reason = t;
|
||||
status = ExceptionsHelper.status(t);
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return this.nodeId;
|
||||
}
|
||||
|
||||
public long getTaskId() {
|
||||
return this.taskId;
|
||||
}
|
||||
|
||||
public String getReason() {
|
||||
return detailedMessage(reason);
|
||||
}
|
||||
|
||||
public RestStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
public Throwable getCause() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskOperationFailure readFrom(StreamInput in) throws IOException {
|
||||
return new TaskOperationFailure(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(nodeId);
|
||||
out.writeLong(taskId);
|
||||
out.writeThrowable(reason);
|
||||
RestStatus.writeTo(out, status);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("task_id", getTaskId());
|
||||
builder.field("node_id", getNodeId());
|
||||
builder.field("status", status.name());
|
||||
if (reason != null) {
|
||||
builder.field("reason");
|
||||
builder.startObject();
|
||||
ElasticsearchException.toXContent(builder, params, reason);
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -23,7 +23,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* A generic proxy that will execute the given action against a specific node.
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.HppcMaps;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
|
|
@ -22,9 +22,9 @@ package org.elasticsearch.action.admin.cluster.health;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
|
|
@ -23,7 +23,11 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -34,6 +38,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -71,7 +76,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
protected final void masterOperation(ClusterHealthRequest request, ClusterState state, ActionListener<ClusterHealthResponse> listener) throws Exception {
|
||||
logger.warn("attempt to execute a cluster health operation without a task");
|
||||
throw new UnsupportedOperationException("task parameter is required for this operation");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
|
||||
|
@ -91,7 +102,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents());
|
||||
doExecute(request, listener);
|
||||
doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ClusterAdminClient;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -23,7 +23,9 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public final class TransportLivenessAction implements TransportRequestHandler<LivenessRequest> {
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
|
|
@ -17,35 +17,30 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest;
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.action.RestActionModule;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
*
|
||||
* Action for retrieving a list of currently running tasks
|
||||
*/
|
||||
public class RestModule extends AbstractModule {
|
||||
public class ListTasksAction extends Action<ListTasksRequest, ListTasksResponse, ListTasksRequestBuilder> {
|
||||
|
||||
private final Settings settings;
|
||||
private List<Class<? extends BaseRestHandler>> restPluginsActions = new ArrayList<>();
|
||||
public static final ListTasksAction INSTANCE = new ListTasksAction();
|
||||
public static final String NAME = "cluster:monitor/tasks/lists";
|
||||
|
||||
public void addRestAction(Class<? extends BaseRestHandler> restAction) {
|
||||
restPluginsActions.add(restAction);
|
||||
private ListTasksAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
public RestModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(RestController.class).asEagerSingleton();
|
||||
new RestActionModule(restPluginsActions).configure(binder());
|
||||
public ListTasksResponse newResponse() {
|
||||
return new ListTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ListTasksRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A request to get node tasks
|
||||
*/
|
||||
public class ListTasksRequest extends BaseTasksRequest<ListTasksRequest> {
|
||||
|
||||
private boolean detailed = false;
|
||||
|
||||
/**
|
||||
* Get information from nodes based on the nodes ids specified. If none are passed, information
|
||||
* for all nodes will be returned.
|
||||
*/
|
||||
public ListTasksRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the detailed task information be returned.
|
||||
*/
|
||||
public boolean detailed() {
|
||||
return this.detailed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the node settings be returned.
|
||||
*/
|
||||
public ListTasksRequest detailed(boolean detailed) {
|
||||
this.detailed = detailed;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
detailed = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(detailed);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.action.support.tasks.TasksRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Builder for the request to retrieve the list of tasks running on the specified nodes
|
||||
*/
|
||||
public class ListTasksRequestBuilder extends TasksRequestBuilder<ListTasksRequest, ListTasksResponse, ListTasksRequestBuilder> {
|
||||
|
||||
public ListTasksRequestBuilder(ElasticsearchClient client, ListTasksAction action) {
|
||||
super(client, action, new ListTasksRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Should detailed task information be returned.
|
||||
*/
|
||||
public ListTasksRequestBuilder setDetailed(boolean detailed) {
|
||||
request.detailed(detailed);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Returns the list of tasks currently running on the nodes
|
||||
*/
|
||||
public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
||||
|
||||
private List<TaskInfo> tasks;
|
||||
|
||||
private Map<DiscoveryNode, List<TaskInfo>> nodes;
|
||||
|
||||
public ListTasksResponse() {
|
||||
}
|
||||
|
||||
public ListTasksResponse(List<TaskInfo> tasks, List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException> nodeFailures) {
|
||||
super(taskFailures, nodeFailures);
|
||||
this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
tasks = Collections.unmodifiableList(in.readList(TaskInfo::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeList(tasks);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of tasks by node
|
||||
*/
|
||||
public Map<DiscoveryNode, List<TaskInfo>> getPerNodeTasks() {
|
||||
if (nodes != null) {
|
||||
return nodes;
|
||||
}
|
||||
Map<DiscoveryNode, List<TaskInfo>> nodeTasks = new HashMap<>();
|
||||
|
||||
Set<DiscoveryNode> nodes = new HashSet<>();
|
||||
for (TaskInfo shard : tasks) {
|
||||
nodes.add(shard.getNode());
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodes) {
|
||||
List<TaskInfo> tasks = new ArrayList<>();
|
||||
for (TaskInfo taskInfo : this.tasks) {
|
||||
if (taskInfo.getNode().equals(node)) {
|
||||
tasks.add(taskInfo);
|
||||
}
|
||||
}
|
||||
nodeTasks.put(node, tasks);
|
||||
}
|
||||
this.nodes = nodeTasks;
|
||||
return nodeTasks;
|
||||
}
|
||||
|
||||
public List<TaskInfo> getTasks() {
|
||||
return tasks;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
|
||||
builder.startArray("task_failures");
|
||||
for (TaskOperationFailure ex : getTaskFailures()){
|
||||
builder.value(ex);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
|
||||
builder.startArray("node_failures");
|
||||
for (FailedNodeException ex : getNodeFailures()){
|
||||
builder.value(ex);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : getPerNodeTasks().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("name", node.name());
|
||||
builder.field("transport_address", node.address().toString());
|
||||
builder.field("host", node.getHostName());
|
||||
builder.field("ip", node.getAddress());
|
||||
|
||||
if (!node.attributes().isEmpty()) {
|
||||
builder.startObject("attributes");
|
||||
for (ObjectObjectCursor<String, String> attr : node.attributes()) {
|
||||
builder.field(attr.key, attr.value, XContentBuilder.FieldCaseConversion.NONE);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startArray("tasks");
|
||||
for(TaskInfo task : entry.getValue()) {
|
||||
task.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Information about a currently running task.
|
||||
* <p>
|
||||
* Tasks are used for communication with transport actions. As a result, they can contain callback
|
||||
* references as well as mutable state. That makes it impractical to send tasks over transport channels
|
||||
* and use in APIs. Instead, immutable and streamable TaskInfo objects are used to represent
|
||||
* snapshot information about currently running tasks.
|
||||
*/
|
||||
public class TaskInfo implements Writeable<TaskInfo>, ToXContent {
|
||||
|
||||
private final DiscoveryNode node;
|
||||
|
||||
private final long id;
|
||||
|
||||
private final String type;
|
||||
|
||||
private final String action;
|
||||
|
||||
private final String description;
|
||||
|
||||
private final String parentNode;
|
||||
|
||||
private final long parentId;
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description) {
|
||||
this(node, id, type, action, description, null, -1L);
|
||||
}
|
||||
|
||||
public TaskInfo(DiscoveryNode node, long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
this.node = node;
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.action = action;
|
||||
this.description = description;
|
||||
this.parentNode = parentNode;
|
||||
this.parentId = parentId;
|
||||
}
|
||||
|
||||
public TaskInfo(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
id = in.readLong();
|
||||
type = in.readString();
|
||||
action = in.readString();
|
||||
description = in.readOptionalString();
|
||||
parentNode = in.readOptionalString();
|
||||
parentId = in.readLong();
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return node;
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public String getAction() {
|
||||
return action;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return description;
|
||||
}
|
||||
|
||||
public String getParentNode() {
|
||||
return parentNode;
|
||||
}
|
||||
|
||||
public long getParentId() {
|
||||
return parentId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskInfo readFrom(StreamInput in) throws IOException {
|
||||
return new TaskInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeLong(id);
|
||||
out.writeString(type);
|
||||
out.writeString(action);
|
||||
out.writeOptionalString(description);
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("node", node.getId());
|
||||
builder.field("id", id);
|
||||
builder.field("type", type);
|
||||
builder.field("action", action);
|
||||
if (description != null) {
|
||||
builder.field("description", description);
|
||||
}
|
||||
if (parentNode != null) {
|
||||
builder.field("parent_node", parentNode);
|
||||
builder.field("parent_id", parentId);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
|
||||
@Inject
|
||||
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ListTasksAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, ListTasksRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ListTasksResponse newResponse(ListTasksRequest request, List<TaskInfo> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions) {
|
||||
return new ListTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo readTaskResponse(StreamInput in) throws IOException {
|
||||
return new TaskInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo taskOperation(ListTasksRequest request, Task task) {
|
||||
return task.taskInfo(clusterService.localNode(), request.detailed());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -24,11 +24,12 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.repositories.VerificationFailure;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Unregister repository response
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.settings;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.builder;
|
||||
|
||||
/**
|
||||
* Updates transient and persistent cluster state settings if there are any changes
|
||||
* due to the update.
|
||||
*/
|
||||
final class SettingsUpdater {
|
||||
final Settings.Builder transientUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
|
||||
private final ClusterSettings clusterSettings;
|
||||
|
||||
SettingsUpdater(ClusterSettings clusterSettings) {
|
||||
this.clusterSettings = clusterSettings;
|
||||
}
|
||||
|
||||
synchronized Settings getTransientUpdates() {
|
||||
return transientUpdates.build();
|
||||
}
|
||||
|
||||
synchronized Settings getPersistentUpdate() {
|
||||
return persistentUpdates.build();
|
||||
}
|
||||
|
||||
synchronized ClusterState updateSettings(final ClusterState currentState, Settings transientToApply, Settings persistentToApply) {
|
||||
boolean changed = false;
|
||||
Settings.Builder transientSettings = Settings.settingsBuilder();
|
||||
transientSettings.put(currentState.metaData().transientSettings());
|
||||
changed |= apply(transientToApply, transientSettings, transientUpdates, "transient");
|
||||
|
||||
Settings.Builder persistentSettings = Settings.settingsBuilder();
|
||||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent");
|
||||
|
||||
if (!changed) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder(currentState.metaData())
|
||||
.persistentSettings(persistentSettings.build())
|
||||
.transientSettings(transientSettings.build());
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
|
||||
if (updatedReadOnly) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
}
|
||||
ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build();
|
||||
Settings settings = build.metaData().settings();
|
||||
// now we try to apply things and if they are invalid we fail
|
||||
// this dryRun will validate & parse settings but won't actually apply them.
|
||||
clusterSettings.dryRun(settings);
|
||||
return build;
|
||||
}
|
||||
|
||||
private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
|
||||
boolean changed = false;
|
||||
final Set<String> toRemove = new HashSet<>();
|
||||
Settings.Builder settingsBuilder = Settings.settingsBuilder();
|
||||
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
toRemove.add(entry.getKey());
|
||||
} else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) {
|
||||
settingsBuilder.put(entry.getKey(), entry.getValue());
|
||||
updates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
|
||||
}
|
||||
|
||||
}
|
||||
changed |= applyDeletes(toRemove, target);
|
||||
target.put(settingsBuilder.build());
|
||||
return changed;
|
||||
}
|
||||
|
||||
private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
|
||||
boolean changed = false;
|
||||
for (String entry : deletes) {
|
||||
Set<String> keysToRemove = new HashSet<>();
|
||||
Set<String> keySet = builder.internalMap().keySet();
|
||||
for (String key : keySet) {
|
||||
if (Regex.simpleMatch(entry, key)) {
|
||||
keysToRemove.add(key);
|
||||
}
|
||||
}
|
||||
for (String key : keysToRemove) {
|
||||
builder.remove(key);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
}
|
|
@ -28,25 +28,19 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
|
||||
import org.elasticsearch.cluster.settings.DynamicSettings;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.builder;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -54,15 +48,14 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
|
||||
private final AllocationService allocationService;
|
||||
|
||||
private final DynamicSettings dynamicSettings;
|
||||
private final ClusterSettings clusterSettings;
|
||||
|
||||
@Inject
|
||||
public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
|
||||
AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) {
|
||||
super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new);
|
||||
this.allocationService = allocationService;
|
||||
this.dynamicSettings = dynamicSettings;
|
||||
this.clusterSettings = clusterSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -73,8 +66,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
@Override
|
||||
protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) {
|
||||
// allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it
|
||||
if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && request.persistentSettings().get(MetaData.SETTING_READ_ONLY) != null) ||
|
||||
request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) {
|
||||
if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) ||
|
||||
request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())) {
|
||||
return null;
|
||||
}
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
|
@ -88,9 +81,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) {
|
||||
final Settings.Builder transientUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
|
||||
|
||||
final SettingsUpdater updater = new SettingsUpdater(clusterSettings);
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings",
|
||||
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
|
@ -98,7 +89,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
|
||||
@Override
|
||||
protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterUpdateSettingsResponse(acknowledged, transientUpdates.build(), persistentUpdates.build());
|
||||
return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,7 +116,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
// so we should *not* execute the reroute.
|
||||
if (!clusterService.state().nodes().localNodeMaster()) {
|
||||
logger.debug("Skipping reroute after cluster update settings, because node is no longer master");
|
||||
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build()));
|
||||
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -145,13 +136,13 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
@Override
|
||||
//we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged
|
||||
protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, transientUpdates.build(), persistentUpdates.build());
|
||||
return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNoLongerMaster(String source) {
|
||||
logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master");
|
||||
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build()));
|
||||
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -181,58 +172,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
Settings.Builder transientSettings = Settings.settingsBuilder();
|
||||
transientSettings.put(currentState.metaData().transientSettings());
|
||||
for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) {
|
||||
if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
|
||||
String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state());
|
||||
if (error == null) {
|
||||
transientSettings.put(entry.getKey(), entry.getValue());
|
||||
transientUpdates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error);
|
||||
}
|
||||
} else {
|
||||
logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
Settings.Builder persistentSettings = Settings.settingsBuilder();
|
||||
persistentSettings.put(currentState.metaData().persistentSettings());
|
||||
for (Map.Entry<String, String> entry : request.persistentSettings().getAsMap().entrySet()) {
|
||||
if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
|
||||
String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state());
|
||||
if (error == null) {
|
||||
persistentSettings.put(entry.getKey(), entry.getValue());
|
||||
persistentUpdates.put(entry.getKey(), entry.getValue());
|
||||
changed = true;
|
||||
} else {
|
||||
logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error);
|
||||
}
|
||||
} else {
|
||||
logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
if (!changed) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder(currentState.metaData())
|
||||
.persistentSettings(persistentSettings.build())
|
||||
.transientSettings(transientSettings.build());
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false);
|
||||
if (updatedReadOnly) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
} else {
|
||||
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
}
|
||||
|
||||
return builder(currentState).metaData(metaData).blocks(blocks).build();
|
||||
ClusterState clusterState = updater.updateSettings(currentState, request.transientSettings(), request.persistentSettings());
|
||||
changed = clusterState != currentState;
|
||||
return clusterState;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.delete;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.cluster.state;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.stats;
|
||||
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.carrotsearch.hppc.ObjectIntHashMap;
|
|||
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -38,6 +37,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.stats;
|
||||
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.stats;
|
||||
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
||||
|
@ -30,6 +29,7 @@ import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
|||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
|
|
@ -33,6 +33,8 @@ import org.elasticsearch.script.ScriptService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
public class TransportRenderSearchTemplateAction extends HandledTransportAction<RenderSearchTemplateRequest, RenderSearchTemplateResponse> {
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
@ -55,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction<
|
|||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request);
|
||||
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap());
|
||||
BytesReference processedTemplate = (BytesReference) executable.run();
|
||||
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
|
||||
response.source(processedTemplate);
|
||||
|
|
|
@ -42,7 +42,13 @@ import org.elasticsearch.common.io.FastStringReader;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.analysis.*;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -53,7 +59,13 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* Transport action used to execute analyze requests
|
||||
|
@ -114,13 +126,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
if (indexService == null) {
|
||||
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
|
||||
}
|
||||
MappedFieldType fieldType = indexService.mapperService().smartNameFieldType(request.field());
|
||||
MappedFieldType fieldType = indexService.mapperService().fullName(request.field());
|
||||
if (fieldType != null) {
|
||||
if (fieldType.isNumeric()) {
|
||||
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
|
||||
}
|
||||
analyzer = fieldType.indexAnalyzer();
|
||||
field = fieldType.names().indexName();
|
||||
field = fieldType.name();
|
||||
}
|
||||
}
|
||||
if (field == null) {
|
||||
|
|
|
@ -31,31 +31,37 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* Close index action
|
||||
*/
|
||||
public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> implements NodeSettingsService.Listener {
|
||||
public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> {
|
||||
|
||||
private final MetaDataIndexStateService indexStateService;
|
||||
private final DestructiveOperations destructiveOperations;
|
||||
private volatile boolean closeIndexEnabled;
|
||||
public static final String SETTING_CLUSTER_INDICES_CLOSE_ENABLE = "cluster.indices.close.enable";
|
||||
public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER);
|
||||
|
||||
@Inject
|
||||
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataIndexStateService indexStateService,
|
||||
NodeSettingsService nodeSettingsService, ActionFilters actionFilters,
|
||||
ClusterSettings clusterSettings, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
|
||||
super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new);
|
||||
this.indexStateService = indexStateService;
|
||||
this.destructiveOperations = destructiveOperations;
|
||||
this.closeIndexEnabled = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, true);
|
||||
nodeSettingsService.addListener(this);
|
||||
this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(CLUSTER_INDICES_CLOSE_ENABLE_SETTING, this::setCloseIndexEnabled);
|
||||
}
|
||||
|
||||
private void setCloseIndexEnabled(boolean closeIndexEnabled) {
|
||||
this.closeIndexEnabled = closeIndexEnabled;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -70,12 +76,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
|
||||
protected void doExecute(Task task, CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
|
||||
destructiveOperations.failDestructive(request.indices());
|
||||
if (closeIndexEnabled == false) {
|
||||
throw new IllegalStateException("closing indices is disabled - set [" + SETTING_CLUSTER_INDICES_CLOSE_ENABLE + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace");
|
||||
throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace");
|
||||
}
|
||||
super.doExecute(request, listener);
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -104,13 +110,4 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
final boolean enable = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled);
|
||||
if (enable != this.closeIndexEnabled) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled, enable);
|
||||
this.closeIndexEnabled = enable;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
|
@ -43,6 +42,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -45,8 +45,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
|
||||
@Inject
|
||||
public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService,
|
||||
NodeSettingsService nodeSettingsService, ActionFilters actionFilters,
|
||||
ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
|
||||
super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexRequest::new);
|
||||
this.deleteIndexService = deleteIndexService;
|
||||
|
@ -64,9 +63,9 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(DeleteIndexRequest request, ActionListener<DeleteIndexResponse> listener) {
|
||||
protected void doExecute(Task task, DeleteIndexRequest request, ActionListener<DeleteIndexResponse> listener) {
|
||||
destructiveOperations.failDestructive(request.indices());
|
||||
super.doExecute(request, listener);
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
|
||||
public class SyncedFlushAction extends Action<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
|
||||
|
||||
public static final SyncedFlushAction INSTANCE = new SyncedFlushAction();
|
||||
public static final String NAME = "indices:admin/synced_flush";
|
||||
|
||||
private SyncedFlushAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SyncedFlushResponse newResponse() {
|
||||
return new SyncedFlushResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new SyncedFlushRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush
|
||||
* and writes the same sync id to primary and all copies.
|
||||
*
|
||||
* <p>Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}. </p>
|
||||
*
|
||||
* @see org.elasticsearch.client.Requests#flushRequest(String...)
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
|
||||
* @see SyncedFlushResponse
|
||||
*/
|
||||
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
|
||||
|
||||
public SyncedFlushRequest() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument.
|
||||
* The new request will inherit though headers and context from the original request that caused it.
|
||||
*/
|
||||
public SyncedFlushRequest(ActionRequest originalRequest) {
|
||||
super(originalRequest);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
|
||||
* be sync flushed.
|
||||
*/
|
||||
public SyncedFlushRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SyncedFlushRequest{" +
|
||||
"indices=" + Arrays.toString(indices) + "}";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
|
||||
|
||||
public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) {
|
||||
super(client, action, new SyncedFlushRequest());
|
||||
}
|
||||
|
||||
public SyncedFlushRequestBuilder setIndices(String[] indices) {
|
||||
super.request().indices(indices);
|
||||
return this;
|
||||
}
|
||||
|
||||
public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
|
||||
super.request().indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -16,16 +16,25 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.flush;
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap;
|
|||
/**
|
||||
* The result of performing a sync flush operation on all shards of multiple indices
|
||||
*/
|
||||
public class IndicesSyncedFlushResult implements ToXContent {
|
||||
public class SyncedFlushResponse extends ActionResponse implements ToXContent {
|
||||
|
||||
final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
|
||||
final ShardCounts shardCounts;
|
||||
Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
|
||||
ShardCounts shardCounts;
|
||||
|
||||
SyncedFlushResponse() {
|
||||
|
||||
public IndicesSyncedFlushResult(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
|
||||
}
|
||||
|
||||
public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
|
||||
// shardsResultPerIndex is never modified after it is passed to this
|
||||
// constructor so this is safe even though shardsResultPerIndex is a
|
||||
// ConcurrentHashMap
|
||||
|
@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values()));
|
||||
}
|
||||
|
||||
/** total number shards, including replicas, both assigned and unassigned */
|
||||
/**
|
||||
* total number shards, including replicas, both assigned and unassigned
|
||||
*/
|
||||
public int totalShards() {
|
||||
return shardCounts.total;
|
||||
}
|
||||
|
||||
/** total number of shards for which the operation failed */
|
||||
/**
|
||||
* total number of shards for which the operation failed
|
||||
*/
|
||||
public int failedShards() {
|
||||
return shardCounts.failed;
|
||||
}
|
||||
|
||||
/** total number of shards which were successfully sync-flushed */
|
||||
/**
|
||||
* total number of shards which were successfully sync-flushed
|
||||
*/
|
||||
public int successfulShards() {
|
||||
return shardCounts.successful;
|
||||
}
|
||||
|
@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
builder.endObject();
|
||||
continue;
|
||||
}
|
||||
Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards = shardResults.failedShards();
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardEntry : failedShards.entrySet()) {
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards();
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) {
|
||||
builder.startObject();
|
||||
builder.field(Fields.SHARD, shardResults.shardId().id());
|
||||
builder.field(Fields.REASON, shardEntry.getValue().failureReason());
|
||||
|
@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
return new ShardCounts(total, successful, failed);
|
||||
}
|
||||
|
||||
static final class ShardCounts implements ToXContent {
|
||||
static final class ShardCounts implements ToXContent, Streamable {
|
||||
|
||||
public final int total;
|
||||
public final int successful;
|
||||
public final int failed;
|
||||
public int total;
|
||||
public int successful;
|
||||
public int failed;
|
||||
|
||||
ShardCounts(int total, int successful, int failed) {
|
||||
this.total = total;
|
||||
|
@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
this.failed = failed;
|
||||
}
|
||||
|
||||
ShardCounts() {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.TOTAL, total);
|
||||
|
@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
builder.field(Fields.FAILED, failed);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
total = in.readInt();
|
||||
successful = in.readInt();
|
||||
failed = in.readInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(total);
|
||||
out.writeInt(successful);
|
||||
out.writeInt(failed);
|
||||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
|
@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent {
|
|||
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
|
||||
static final XContentBuilderString REASON = new XContentBuilderString("reason");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardCounts = new ShardCounts();
|
||||
shardCounts.readFrom(in);
|
||||
Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>();
|
||||
int numShardsResults = in.readInt();
|
||||
for (int i =0 ; i< numShardsResults; i++) {
|
||||
String index = in.readString();
|
||||
List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>();
|
||||
int numShards = in.readInt();
|
||||
for (int j =0; j< numShards; j++) {
|
||||
shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in));
|
||||
}
|
||||
tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults);
|
||||
}
|
||||
shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardCounts.writeTo(out);
|
||||
out.writeInt(shardsResultPerIndex.size());
|
||||
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeInt(entry.getValue().size());
|
||||
for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) {
|
||||
shardsSyncedFlushResult.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* Synced flush Action.
|
||||
*/
|
||||
public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> {
|
||||
|
||||
SyncedFlushService syncedFlushService;
|
||||
|
||||
@Inject
|
||||
public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SyncedFlushService syncedFlushService) {
|
||||
super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new);
|
||||
this.syncedFlushService = syncedFlushService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
|
||||
syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener);
|
||||
}
|
||||
}
|
|
@ -21,10 +21,7 @@ package org.elasticsearch.action.admin.indices.forcemerge;
|
|||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
|
|
|
@ -171,7 +171,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
for (String field : request.fields()) {
|
||||
if (Regex.isMatchAllPattern(field)) {
|
||||
for (FieldMapper fieldMapper : allFieldMappers) {
|
||||
addFieldMapper(fieldMapper.fieldType().names().fullName(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
}
|
||||
} else if (Regex.isSimpleMatchPattern(field)) {
|
||||
// go through the field mappers 3 times, to make sure we give preference to the resolve order: full name, index name, name.
|
||||
|
@ -179,15 +179,15 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
Collection<FieldMapper> remainingFieldMappers = newLinkedList(allFieldMappers);
|
||||
for (Iterator<FieldMapper> it = remainingFieldMappers.iterator(); it.hasNext(); ) {
|
||||
final FieldMapper fieldMapper = it.next();
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().names().fullName())) {
|
||||
addFieldMapper(fieldMapper.fieldType().names().fullName(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
for (Iterator<FieldMapper> it = remainingFieldMappers.iterator(); it.hasNext(); ) {
|
||||
final FieldMapper fieldMapper = it.next();
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().names().indexName())) {
|
||||
addFieldMapper(fieldMapper.fieldType().names().indexName(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().name())) {
|
||||
addFieldMapper(fieldMapper.fieldType().name(), fieldMapper, fieldMappings, request.includeDefaults());
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
builder.startObject();
|
||||
fieldMapper.toXContent(builder, includeDefaults ? includeDefaultsParams : ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().names().fullName(), builder.bytes()));
|
||||
fieldMappings.put(field, new FieldMappingMetaData(fieldMapper.fieldType().name(), builder.bytes()));
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to serialize XContent of field [" + field + "]", e);
|
||||
}
|
||||
|
|
|
@ -32,12 +32,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Open index action
|
||||
*/
|
||||
|
@ -49,7 +47,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
|||
@Inject
|
||||
public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, MetaDataIndexStateService indexStateService,
|
||||
NodeSettingsService nodeSettingsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
DestructiveOperations destructiveOperations) {
|
||||
super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest::new);
|
||||
this.indexStateService = indexStateService;
|
||||
|
@ -68,9 +66,9 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(OpenIndexRequest request, ActionListener<OpenIndexResponse> listener) {
|
||||
protected void doExecute(Task task, OpenIndexRequest request, ActionListener<OpenIndexResponse> listener) {
|
||||
destructiveOperations.failDestructive(request.indices());
|
||||
super.doExecute(request, listener);
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -35,8 +35,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
|
|
@ -20,9 +20,9 @@ package org.elasticsearch.action.admin.indices.shards;
|
|||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
|
|
@ -40,7 +40,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.*;
|
||||
import static org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.readStoreStatus;
|
||||
|
||||
/**
|
||||
* Response for {@link IndicesShardStoresAction}
|
||||
|
@ -56,13 +56,14 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
|
||||
private DiscoveryNode node;
|
||||
private long version;
|
||||
private String allocationId;
|
||||
private Throwable storeException;
|
||||
private Allocation allocation;
|
||||
private AllocationStatus allocationStatus;
|
||||
|
||||
/**
|
||||
* The status of the shard store with respect to the cluster
|
||||
*/
|
||||
public enum Allocation {
|
||||
public enum AllocationStatus {
|
||||
|
||||
/**
|
||||
* Allocated as primary
|
||||
|
@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
|
||||
private final byte id;
|
||||
|
||||
Allocation(byte id) {
|
||||
AllocationStatus(byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static Allocation fromId(byte id) {
|
||||
private static AllocationStatus fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return PRIMARY;
|
||||
case 1: return REPLICA;
|
||||
case 2: return UNUSED;
|
||||
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]");
|
||||
default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
case 0: return "primary";
|
||||
case 1: return "replica";
|
||||
case 2: return "unused";
|
||||
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]");
|
||||
default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
private static Allocation readFrom(StreamInput in) throws IOException {
|
||||
private static AllocationStatus readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
|
@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
private StoreStatus() {
|
||||
}
|
||||
|
||||
public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) {
|
||||
public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
|
||||
this.node = node;
|
||||
this.version = version;
|
||||
this.allocation = allocation;
|
||||
this.allocationId = allocationId;
|
||||
this.allocationStatus = allocationStatus;
|
||||
this.storeException = storeException;
|
||||
}
|
||||
|
||||
|
@ -130,13 +132,20 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
}
|
||||
|
||||
/**
|
||||
* Version of the store, used to select the store that will be
|
||||
* used as a primary.
|
||||
* Version of the store
|
||||
*/
|
||||
public long getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/**
|
||||
* AllocationStatus id of the store, used to select the store that will be
|
||||
* used as a primary.
|
||||
*/
|
||||
public String getAllocationId() {
|
||||
return allocationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exception while trying to open the
|
||||
* shard index or from when the shard failed
|
||||
|
@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
}
|
||||
|
||||
/**
|
||||
* The allocation status of the store.
|
||||
* {@link Allocation#PRIMARY} indicates a primary shard copy
|
||||
* {@link Allocation#REPLICA} indicates a replica shard copy
|
||||
* {@link Allocation#UNUSED} indicates an unused shard copy
|
||||
* The allocationStatus status of the store.
|
||||
* {@link AllocationStatus#PRIMARY} indicates a primary shard copy
|
||||
* {@link AllocationStatus#REPLICA} indicates a replica shard copy
|
||||
* {@link AllocationStatus#UNUSED} indicates an unused shard copy
|
||||
*/
|
||||
public Allocation getAllocation() {
|
||||
return allocation;
|
||||
public AllocationStatus getAllocationStatus() {
|
||||
return allocationStatus;
|
||||
}
|
||||
|
||||
static StoreStatus readStoreStatus(StreamInput in) throws IOException {
|
||||
|
@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
node = DiscoveryNode.readNode(in);
|
||||
version = in.readLong();
|
||||
allocation = Allocation.readFrom(in);
|
||||
allocationId = in.readOptionalString();
|
||||
allocationStatus = AllocationStatus.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
storeException = in.readThrowable();
|
||||
}
|
||||
|
@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
out.writeLong(version);
|
||||
allocation.writeTo(out);
|
||||
out.writeOptionalString(allocationId);
|
||||
allocationStatus.writeTo(out);
|
||||
if (storeException != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeThrowable(storeException);
|
||||
|
@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
node.toXContent(builder, params);
|
||||
builder.field(Fields.VERSION, version);
|
||||
builder.field(Fields.ALLOCATED, allocation.value());
|
||||
builder.field(Fields.ALLOCATION_ID, allocationId);
|
||||
builder.field(Fields.ALLOCATED, allocationStatus.value());
|
||||
if (storeException != null) {
|
||||
builder.startObject(Fields.STORE_EXCEPTION);
|
||||
ElasticsearchException.toXContent(builder, params, storeException);
|
||||
|
@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
} else {
|
||||
int compare = Long.compare(other.version, version);
|
||||
if (compare == 0) {
|
||||
return Integer.compare(allocation.id, other.allocation.id);
|
||||
return Integer.compare(allocationStatus.id, other.allocationStatus.id);
|
||||
}
|
||||
return compare;
|
||||
}
|
||||
|
@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
static final XContentBuilderString STORES = new XContentBuilderString("stores");
|
||||
// StoreStatus fields
|
||||
static final XContentBuilderString VERSION = new XContentBuilderString("version");
|
||||
static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
|
||||
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
|
||||
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");
|
||||
}
|
||||
|
|
|
@ -21,14 +21,14 @@ package org.elasticsearch.action.admin.indices.shards;
|
|||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
}
|
||||
for (NodeGatewayStartedShards response : fetchResponse.responses) {
|
||||
if (shardExistsInNode(response)) {
|
||||
IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException()));
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
|
||||
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
|
||||
}
|
||||
}
|
||||
CollectionUtil.timSort(storeStatuses);
|
||||
|
@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
|
||||
}
|
||||
|
||||
private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) {
|
||||
private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
|
||||
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
|
||||
ShardId shardId = shardRouting.shardId();
|
||||
if (shardId.id() == shardID && shardId.getIndex().equals(index)) {
|
||||
if (shardRouting.primary()) {
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
|
||||
} else if (shardRouting.assignedToNode()) {
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA;
|
||||
} else {
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
|
||||
}
|
||||
}
|
||||
}
|
||||
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED;
|
||||
return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shard exists/existed in a node only if shard state file exists in the node
|
||||
*/
|
||||
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
|
||||
return response.storeException() != null || response.version() != -1;
|
||||
return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.admin.indices.warmer.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
|
|
@ -34,11 +34,9 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal
|
||||
* thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally.
|
||||
*
|
||||
* Notes for implementing custom subclasses:
|
||||
*
|
||||
* The underlying mathematical principle of <code>BackoffPolicy</code> are progressions which can be either finite or infinite although
|
||||
* the latter should not be used for retrying. A progression can be mapped to a <code>java.util.Iterator</code> with the following
|
||||
* semantics:
|
||||
*
|
||||
* <ul>
|
||||
* <li><code>#hasNext()</code> determines whether the progression has more elements. Return <code>true</code> for infinite progressions</li>
|
||||
* <li><code>#next()</code> determines the next element in the progression, i.e. the next wait time period</li>
|
||||
* </ul>
|
||||
*
|
||||
* Note that backoff policies are exposed as <code>Iterables</code> in order to be consumed multiple times.
|
||||
*/
|
||||
public abstract class BackoffPolicy implements Iterable<TimeValue> {
|
||||
private static final BackoffPolicy NO_BACKOFF = new NoBackoff();
|
||||
|
||||
/**
|
||||
* Creates a backoff policy that will not allow any backoff, i.e. an operation will fail after the first attempt.
|
||||
*
|
||||
* @return A backoff policy without any backoff period. The returned instance is thread safe.
|
||||
*/
|
||||
public static BackoffPolicy noBackoff() {
|
||||
return NO_BACKOFF;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an new constant backoff policy with the provided configuration.
|
||||
*
|
||||
* @param delay The delay defines how long to wait between retry attempts. Must not be null.
|
||||
* Must be <= <code>Integer.MAX_VALUE</code> ms.
|
||||
* @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
|
||||
* @return A backoff policy with a constant wait time between retries. The returned instance is thread safe but each
|
||||
* iterator created from it should only be used by a single thread.
|
||||
*/
|
||||
public static BackoffPolicy constantBackoff(TimeValue delay, int maxNumberOfRetries) {
|
||||
return new ConstantBackoff(checkDelay(delay), maxNumberOfRetries);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an new exponential backoff policy with a default configuration of 50 ms initial wait period and 8 retries taking
|
||||
* roughly 5.1 seconds in total.
|
||||
*
|
||||
* @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
|
||||
* iterator created from it should only be used by a single thread.
|
||||
*/
|
||||
public static BackoffPolicy exponentialBackoff() {
|
||||
return exponentialBackoff(TimeValue.timeValueMillis(50), 8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an new exponential backoff policy with the provided configuration.
|
||||
*
|
||||
* @param initialDelay The initial delay defines how long to wait for the first retry attempt. Must not be null.
|
||||
* Must be <= <code>Integer.MAX_VALUE</code> ms.
|
||||
* @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
|
||||
* @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
|
||||
* iterator created from it should only be used by a single thread.
|
||||
*/
|
||||
public static BackoffPolicy exponentialBackoff(TimeValue initialDelay, int maxNumberOfRetries) {
|
||||
return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries);
|
||||
}
|
||||
|
||||
private static TimeValue checkDelay(TimeValue delay) {
|
||||
if (delay.millis() > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms");
|
||||
}
|
||||
return delay;
|
||||
}
|
||||
|
||||
private static class NoBackoff extends BackoffPolicy {
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new Iterator<TimeValue>() {
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
throw new NoSuchElementException("No backoff");
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static class ExponentialBackoff extends BackoffPolicy {
|
||||
private final int start;
|
||||
|
||||
private final int numberOfElements;
|
||||
|
||||
private ExponentialBackoff(int start, int numberOfElements) {
|
||||
assert start >= 0;
|
||||
assert numberOfElements >= 0;
|
||||
this.start = start;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new ExponentialBackoffIterator(start, numberOfElements);
|
||||
}
|
||||
}
|
||||
|
||||
private static class ExponentialBackoffIterator implements Iterator<TimeValue> {
|
||||
private final int numberOfElements;
|
||||
|
||||
private final int start;
|
||||
|
||||
private int currentlyConsumed;
|
||||
|
||||
private ExponentialBackoffIterator(int start, int numberOfElements) {
|
||||
this.start = start;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return currentlyConsumed < numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException("Only up to " + numberOfElements + " elements");
|
||||
}
|
||||
int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1);
|
||||
currentlyConsumed++;
|
||||
return TimeValue.timeValueMillis(result);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ConstantBackoff extends BackoffPolicy {
|
||||
private final TimeValue delay;
|
||||
|
||||
private final int numberOfElements;
|
||||
|
||||
public ConstantBackoff(TimeValue delay, int numberOfElements) {
|
||||
assert numberOfElements >= 0;
|
||||
this.delay = delay;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new ConstantBackoffIterator(delay, numberOfElements);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ConstantBackoffIterator implements Iterator<TimeValue> {
|
||||
private final TimeValue delay;
|
||||
private final int numberOfElements;
|
||||
private int curr;
|
||||
|
||||
public ConstantBackoffIterator(TimeValue delay, int numberOfElements) {
|
||||
this.delay = delay;
|
||||
this.numberOfElements = numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return curr < numberOfElements;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
curr++;
|
||||
return delay;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -33,7 +32,11 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
/**
|
||||
|
@ -48,7 +51,7 @@ public class BulkProcessor implements Closeable {
|
|||
/**
|
||||
* A listener for the execution.
|
||||
*/
|
||||
public static interface Listener {
|
||||
public interface Listener {
|
||||
|
||||
/**
|
||||
* Callback before the bulk is executed.
|
||||
|
@ -62,6 +65,9 @@ public class BulkProcessor implements Closeable {
|
|||
|
||||
/**
|
||||
* Callback after a failed execution of bulk request.
|
||||
*
|
||||
* Note that in case an instance of <code>InterruptedException</code> is passed, which means that request processing has been
|
||||
* cancelled externally, the thread's interruption status has been restored prior to calling this method.
|
||||
*/
|
||||
void afterBulk(long executionId, BulkRequest request, Throwable failure);
|
||||
}
|
||||
|
@ -79,6 +85,7 @@ public class BulkProcessor implements Closeable {
|
|||
private int bulkActions = 1000;
|
||||
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
|
||||
private TimeValue flushInterval = null;
|
||||
private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
|
||||
|
||||
/**
|
||||
* Creates a builder of bulk processor with the client to use and the listener that will be used
|
||||
|
@ -136,54 +143,58 @@ public class BulkProcessor implements Closeable {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally
|
||||
* in case they have failed due to resource constraints (i.e. a thread pool was full).
|
||||
*
|
||||
* The default is to back off exponentially.
|
||||
*
|
||||
* @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff()
|
||||
*/
|
||||
public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) {
|
||||
if (backoffPolicy == null) {
|
||||
throw new NullPointerException("'backoffPolicy' must not be null. To disable backoff, pass BackoffPolicy.noBackoff()");
|
||||
}
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a new bulk processor.
|
||||
*/
|
||||
public BulkProcessor build() {
|
||||
return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
|
||||
return new BulkProcessor(client, backoffPolicy, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
|
||||
}
|
||||
}
|
||||
|
||||
public static Builder builder(Client client, Listener listener) {
|
||||
if (client == null) {
|
||||
throw new NullPointerException("The client you specified while building a BulkProcessor is null");
|
||||
}
|
||||
Objects.requireNonNull(client, "client");
|
||||
Objects.requireNonNull(listener, "listener");
|
||||
|
||||
return new Builder(client, listener);
|
||||
}
|
||||
|
||||
private final Client client;
|
||||
private final Listener listener;
|
||||
|
||||
private final String name;
|
||||
|
||||
private final int concurrentRequests;
|
||||
private final int bulkActions;
|
||||
private final long bulkSize;
|
||||
private final TimeValue flushInterval;
|
||||
|
||||
private final Semaphore semaphore;
|
||||
|
||||
private final ScheduledThreadPoolExecutor scheduler;
|
||||
private final ScheduledFuture scheduledFuture;
|
||||
|
||||
private final AtomicLong executionIdGen = new AtomicLong();
|
||||
|
||||
private BulkRequest bulkRequest;
|
||||
private final BulkRequestHandler bulkRequestHandler;
|
||||
|
||||
private volatile boolean closed = false;
|
||||
|
||||
BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
|
||||
this.client = client;
|
||||
this.listener = listener;
|
||||
this.name = name;
|
||||
this.concurrentRequests = concurrentRequests;
|
||||
BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
|
||||
this.bulkActions = bulkActions;
|
||||
this.bulkSize = bulkSize.bytes();
|
||||
|
||||
this.semaphore = new Semaphore(concurrentRequests);
|
||||
this.bulkRequest = new BulkRequest();
|
||||
this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests);
|
||||
|
||||
this.flushInterval = flushInterval;
|
||||
if (flushInterval != null) {
|
||||
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
|
||||
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
|
||||
|
@ -231,14 +242,7 @@ public class BulkProcessor implements Closeable {
|
|||
if (bulkRequest.numberOfActions() > 0) {
|
||||
execute();
|
||||
}
|
||||
if (this.concurrentRequests < 1) {
|
||||
return true;
|
||||
}
|
||||
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
|
||||
semaphore.release(this.concurrentRequests);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return this.bulkRequestHandler.awaitClose(timeout, unit);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -308,58 +312,7 @@ public class BulkProcessor implements Closeable {
|
|||
final long executionId = executionIdGen.incrementAndGet();
|
||||
|
||||
this.bulkRequest = new BulkRequest();
|
||||
|
||||
if (concurrentRequests == 0) {
|
||||
// execute in a blocking fashion...
|
||||
boolean afterCalled = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
BulkResponse bulkItemResponses = client.bulk(bulkRequest).actionGet();
|
||||
afterCalled = true;
|
||||
listener.afterBulk(executionId, bulkRequest, bulkItemResponses);
|
||||
} catch (Exception e) {
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
boolean success = false;
|
||||
boolean acquired = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
semaphore.acquire();
|
||||
acquired = true;
|
||||
client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, response);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
});
|
||||
success = true;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.interrupted();
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} catch (Throwable t) {
|
||||
listener.afterBulk(executionId, bulkRequest, t);
|
||||
} finally {
|
||||
if (!success && acquired) { // if we fail on client.bulk() release the semaphore
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
this.bulkRequestHandler.execute(bulkRequest, executionId);
|
||||
}
|
||||
|
||||
private boolean isOverTheLimit() {
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Abstracts the low-level details of bulk request handling
|
||||
*/
|
||||
abstract class BulkRequestHandler {
|
||||
protected final ESLogger logger;
|
||||
protected final Client client;
|
||||
|
||||
protected BulkRequestHandler(Client client) {
|
||||
this.client = client;
|
||||
this.logger = Loggers.getLogger(getClass(), client.settings());
|
||||
}
|
||||
|
||||
|
||||
public abstract void execute(BulkRequest bulkRequest, long executionId);
|
||||
|
||||
public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
|
||||
|
||||
|
||||
public static BulkRequestHandler syncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
|
||||
return new SyncBulkRequestHandler(client, backoffPolicy, listener);
|
||||
}
|
||||
|
||||
public static BulkRequestHandler asyncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
|
||||
return new AsyncBulkRequestHandler(client, backoffPolicy, listener, concurrentRequests);
|
||||
}
|
||||
|
||||
private static class SyncBulkRequestHandler extends BulkRequestHandler {
|
||||
private final BulkProcessor.Listener listener;
|
||||
private final BackoffPolicy backoffPolicy;
|
||||
|
||||
public SyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
|
||||
super(client);
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(BulkRequest bulkRequest, long executionId) {
|
||||
boolean afterCalled = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
BulkResponse bulkResponse = Retry
|
||||
.on(EsRejectedExecutionException.class)
|
||||
.policy(backoffPolicy)
|
||||
.withSyncBackoff(client, bulkRequest);
|
||||
afterCalled = true;
|
||||
listener.afterBulk(executionId, bulkRequest, bulkResponse);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
logger.info("Bulk request {} has been cancelled.", e, executionId);
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.warn("Failed to execute bulk request {}.", t, executionId);
|
||||
if (!afterCalled) {
|
||||
listener.afterBulk(executionId, bulkRequest, t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
||||
// we are "closed" immediately as there is no request in flight
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
private static class AsyncBulkRequestHandler extends BulkRequestHandler {
|
||||
private final BackoffPolicy backoffPolicy;
|
||||
private final BulkProcessor.Listener listener;
|
||||
private final Semaphore semaphore;
|
||||
private final int concurrentRequests;
|
||||
|
||||
private AsyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
|
||||
super(client);
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
assert concurrentRequests > 0;
|
||||
this.listener = listener;
|
||||
this.concurrentRequests = concurrentRequests;
|
||||
this.semaphore = new Semaphore(concurrentRequests);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(BulkRequest bulkRequest, long executionId) {
|
||||
boolean bulkRequestSetupSuccessful = false;
|
||||
boolean acquired = false;
|
||||
try {
|
||||
listener.beforeBulk(executionId, bulkRequest);
|
||||
semaphore.acquire();
|
||||
acquired = true;
|
||||
Retry.on(EsRejectedExecutionException.class)
|
||||
.policy(backoffPolicy)
|
||||
.withAsyncBackoff(client, bulkRequest, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse response) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, response);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
try {
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} finally {
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
});
|
||||
bulkRequestSetupSuccessful = true;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
logger.info("Bulk request {} has been cancelled.", e, executionId);
|
||||
listener.afterBulk(executionId, bulkRequest, e);
|
||||
} catch (Throwable t) {
|
||||
logger.warn("Failed to execute bulk request {}.", t, executionId);
|
||||
listener.afterBulk(executionId, bulkRequest, t);
|
||||
} finally {
|
||||
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
|
||||
semaphore.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
|
||||
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
|
||||
semaphore.release(this.concurrentRequests);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Encapsulates synchronous and asynchronous retry logic.
|
||||
*/
|
||||
class Retry {
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
|
||||
private BackoffPolicy backoffPolicy;
|
||||
|
||||
public static Retry on(Class<? extends Throwable> retryOnThrowable) {
|
||||
return new Retry(retryOnThrowable);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param backoffPolicy The backoff policy that defines how long and how often to wait for retries.
|
||||
*/
|
||||
public Retry policy(BackoffPolicy backoffPolicy) {
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
Retry(Class<? extends Throwable> retryOnThrowable) {
|
||||
this.retryOnThrowable = retryOnThrowable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes #bulk(BulkRequest, ActionListener) on the provided client. Backs off on the provided exception and delegates results to the
|
||||
* provided listener.
|
||||
*
|
||||
* @param client Client invoking the bulk request.
|
||||
* @param bulkRequest The bulk request that should be executed.
|
||||
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
|
||||
*/
|
||||
public void withAsyncBackoff(Client client, BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
|
||||
AsyncRetryHandler r = new AsyncRetryHandler(retryOnThrowable, backoffPolicy, client, listener);
|
||||
r.execute(bulkRequest);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes #bulk(BulkRequest) on the provided client. Backs off on the provided exception.
|
||||
*
|
||||
* @param client Client invoking the bulk request.
|
||||
* @param bulkRequest The bulk request that should be executed.
|
||||
* @return the bulk response as returned by the client.
|
||||
* @throws Exception Any exception thrown by the callable.
|
||||
*/
|
||||
public BulkResponse withSyncBackoff(Client client, BulkRequest bulkRequest) throws Exception {
|
||||
return SyncRetryHandler
|
||||
.create(retryOnThrowable, backoffPolicy, client)
|
||||
.executeBlocking(bulkRequest)
|
||||
.actionGet();
|
||||
}
|
||||
|
||||
static class AbstractRetryHandler implements ActionListener<BulkResponse> {
|
||||
private final ESLogger logger;
|
||||
private final Client client;
|
||||
private final ActionListener<BulkResponse> listener;
|
||||
private final Iterator<TimeValue> backoff;
|
||||
private final Class<? extends Throwable> retryOnThrowable;
|
||||
// Access only when holding a client-side lock, see also #addResponses()
|
||||
private final List<BulkItemResponse> responses = new ArrayList<>();
|
||||
private final long startTimestampNanos;
|
||||
// needed to construct the next bulk request based on the response to the previous one
|
||||
// volatile as we're called from a scheduled thread
|
||||
private volatile BulkRequest currentBulkRequest;
|
||||
private volatile ScheduledFuture<?> scheduledRequestFuture;
|
||||
|
||||
public AbstractRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
|
||||
this.retryOnThrowable = retryOnThrowable;
|
||||
this.backoff = backoffPolicy.iterator();
|
||||
this.client = client;
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), client.settings());
|
||||
// in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
|
||||
this.startTimestampNanos = System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkItemResponses) {
|
||||
if (!bulkItemResponses.hasFailures()) {
|
||||
// we're done here, include all responses
|
||||
addResponses(bulkItemResponses, (r -> true));
|
||||
finishHim();
|
||||
} else {
|
||||
if (canRetry(bulkItemResponses)) {
|
||||
addResponses(bulkItemResponses, (r -> !r.isFailed()));
|
||||
retry(createBulkRequestForRetry(bulkItemResponses));
|
||||
} else {
|
||||
addResponses(bulkItemResponses, (r -> true));
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
try {
|
||||
listener.onFailure(e);
|
||||
} finally {
|
||||
FutureUtils.cancel(scheduledRequestFuture);
|
||||
}
|
||||
}
|
||||
|
||||
private void retry(BulkRequest bulkRequestForRetry) {
|
||||
assert backoff.hasNext();
|
||||
TimeValue next = backoff.next();
|
||||
logger.trace("Retry of bulk request scheduled in {} ms.", next.millis());
|
||||
scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry)));
|
||||
}
|
||||
|
||||
private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) {
|
||||
BulkRequest requestToReissue = new BulkRequest();
|
||||
int index = 0;
|
||||
for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) {
|
||||
if (bulkItemResponse.isFailed()) {
|
||||
requestToReissue.add(currentBulkRequest.requests().get(index));
|
||||
}
|
||||
index++;
|
||||
}
|
||||
return requestToReissue;
|
||||
}
|
||||
|
||||
private boolean canRetry(BulkResponse bulkItemResponses) {
|
||||
if (!backoff.hasNext()) {
|
||||
return false;
|
||||
}
|
||||
for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
|
||||
if (bulkItemResponse.isFailed()) {
|
||||
Throwable cause = bulkItemResponse.getFailure().getCause();
|
||||
Throwable rootCause = ExceptionsHelper.unwrapCause(cause);
|
||||
if (!rootCause.getClass().equals(retryOnThrowable)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
listener.onResponse(getAccumulatedResponse());
|
||||
} finally {
|
||||
FutureUtils.cancel(scheduledRequestFuture);
|
||||
}
|
||||
}
|
||||
|
||||
private void addResponses(BulkResponse response, Predicate<BulkItemResponse> filter) {
|
||||
for (BulkItemResponse bulkItemResponse : response) {
|
||||
if (filter.test(bulkItemResponse)) {
|
||||
// Use client-side lock here to avoid visibility issues. This method may be called multiple times
|
||||
// (based on how many retries we have to issue) and relying that the response handling code will be
|
||||
// scheduled on the same thread is fragile.
|
||||
synchronized (responses) {
|
||||
responses.add(bulkItemResponse);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private BulkResponse getAccumulatedResponse() {
|
||||
BulkItemResponse[] itemResponses;
|
||||
synchronized (responses) {
|
||||
itemResponses = responses.toArray(new BulkItemResponse[1]);
|
||||
}
|
||||
long stopTimestamp = System.nanoTime();
|
||||
long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis();
|
||||
return new BulkResponse(itemResponses, totalLatencyMs);
|
||||
}
|
||||
|
||||
public void execute(BulkRequest bulkRequest) {
|
||||
this.currentBulkRequest = bulkRequest;
|
||||
client.bulk(bulkRequest, this);
|
||||
}
|
||||
}
|
||||
|
||||
static class AsyncRetryHandler extends AbstractRetryHandler {
|
||||
public AsyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
|
||||
super(retryOnThrowable, backoffPolicy, client, listener);
|
||||
}
|
||||
}
|
||||
|
||||
static class SyncRetryHandler extends AbstractRetryHandler {
|
||||
private final PlainActionFuture<BulkResponse> actionFuture;
|
||||
|
||||
public static SyncRetryHandler create(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client) {
|
||||
PlainActionFuture<BulkResponse> actionFuture = PlainActionFuture.newFuture();
|
||||
return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture);
|
||||
}
|
||||
|
||||
public SyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture<BulkResponse> actionFuture) {
|
||||
super(retryOnThrowable, backoffPolicy, client, actionFuture);
|
||||
this.actionFuture = actionFuture;
|
||||
}
|
||||
|
||||
public ActionFuture<BulkResponse> executeBlocking(BulkRequest bulkRequest) {
|
||||
super.execute(bulkRequest);
|
||||
return actionFuture;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -239,7 +239,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
} else {
|
||||
concreteIndices.resolveIfAbsent(req);
|
||||
req.routing(clusterState.metaData().resolveIndexRouting(req.routing(), req.index()));
|
||||
req.routing(clusterState.metaData().resolveIndexRouting(req.parent(), req.routing(), req.index()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,6 +50,8 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
private String id;
|
||||
@Nullable
|
||||
private String routing;
|
||||
@Nullable
|
||||
private String parent;
|
||||
private boolean refresh;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
|
@ -94,6 +96,7 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
this.type = request.type();
|
||||
this.id = request.id();
|
||||
this.routing = request.routing();
|
||||
this.parent = request.parent();
|
||||
this.refresh = request.refresh();
|
||||
this.version = request.version();
|
||||
this.versionType = request.versionType();
|
||||
|
@ -155,13 +158,18 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the parent id of this document. Will simply set the routing to this value, as it is only
|
||||
* used for routing with delete requests.
|
||||
* @return The parent for this request.
|
||||
*/
|
||||
@Override
|
||||
public String parent() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the parent id of this document.
|
||||
*/
|
||||
public DeleteRequest parent(String parent) {
|
||||
if (routing == null) {
|
||||
routing = parent;
|
||||
}
|
||||
this.parent = parent;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -230,6 +238,7 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
type = in.readString();
|
||||
id = in.readString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
|
@ -241,6 +250,7 @@ public class DeleteRequest extends ReplicationRequest<DeleteRequest> implements
|
|||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
out.writeOptionalString(routing());
|
||||
out.writeOptionalString(parent());
|
||||
out.writeBoolean(refresh);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
|
|
|
@ -95,7 +95,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
|
||||
@Override
|
||||
protected void resolveRequest(final MetaData metaData, String concreteIndex, DeleteRequest request) {
|
||||
request.routing(metaData.resolveIndexRouting(request.routing(), request.index()));
|
||||
request.routing(metaData.resolveIndexRouting(request.parent(), request.routing(), request.index()));
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
// check if routing is required, if so, do a broadcast delete
|
||||
MappingMetaData mappingMd = metaData.index(concreteIndex).mappingOrDefault(request.type());
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.explain;
|
||||
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
|
|
@ -48,7 +48,14 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
public class TransportFieldStatsTransportAction extends TransportBroadcastAction<FieldStatsRequest, FieldStatsResponse, FieldStatsShardRequest, FieldStatsShardResponse> {
|
||||
|
|
|
@ -49,6 +49,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
private String type;
|
||||
private String id;
|
||||
private String routing;
|
||||
private String parent;
|
||||
private String preference;
|
||||
|
||||
private String[] fields;
|
||||
|
@ -77,6 +78,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
this.type = getRequest.type;
|
||||
this.id = getRequest.id;
|
||||
this.routing = getRequest.routing;
|
||||
this.parent = getRequest.parent;
|
||||
this.preference = getRequest.preference;
|
||||
this.fields = getRequest.fields;
|
||||
this.fetchSourceContext = getRequest.fetchSourceContext;
|
||||
|
@ -153,13 +155,17 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the parent id of this document. Will simply set the routing to this value, as it is only
|
||||
* used for routing with delete requests.
|
||||
* @return The parent for this request.
|
||||
*/
|
||||
public String parent() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the parent id of this document.
|
||||
*/
|
||||
public GetRequest parent(String parent) {
|
||||
if (routing == null) {
|
||||
routing = parent;
|
||||
}
|
||||
this.parent = parent;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -291,6 +297,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
type = in.readString();
|
||||
id = in.readString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
preference = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
int size = in.readInt();
|
||||
|
@ -320,6 +327,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
out.writeOptionalString(preference);
|
||||
|
||||
out.writeBoolean(refresh);
|
||||
|
|
|
@ -20,13 +20,17 @@
|
|||
package org.elasticsearch.action.get;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.*;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.RealtimeRequest;
|
||||
import org.elasticsearch.action.ValidateActions;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
@ -37,7 +41,11 @@ import org.elasticsearch.index.VersionType;
|
|||
import org.elasticsearch.search.fetch.source.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest {
|
||||
|
||||
|
@ -49,6 +57,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
private String type;
|
||||
private String id;
|
||||
private String routing;
|
||||
private String parent;
|
||||
private String[] fields;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
|
@ -116,12 +125,17 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
}
|
||||
|
||||
public Item parent(String parent) {
|
||||
if (routing == null) {
|
||||
this.routing = parent;
|
||||
}
|
||||
this.parent = parent;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The parent for this request.
|
||||
*/
|
||||
public String parent() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
public Item fields(String... fields) {
|
||||
this.fields = fields;
|
||||
return this;
|
||||
|
@ -173,6 +187,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
type = in.readOptionalString();
|
||||
id = in.readString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
int size = in.readVInt();
|
||||
if (size > 0) {
|
||||
fields = new String[size];
|
||||
|
@ -192,6 +207,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
out.writeOptionalString(type);
|
||||
out.writeString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
if (fields == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
|
@ -221,6 +237,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
if (!id.equals(item.id)) return false;
|
||||
if (!index.equals(item.index)) return false;
|
||||
if (routing != null ? !routing.equals(item.routing) : item.routing != null) return false;
|
||||
if (parent != null ? !parent.equals(item.parent) : item.parent != null) return false;
|
||||
if (type != null ? !type.equals(item.type) : item.type != null) return false;
|
||||
if (versionType != item.versionType) return false;
|
||||
|
||||
|
@ -233,6 +250,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
result = 31 * result + (type != null ? type.hashCode() : 0);
|
||||
result = 31 * result + id.hashCode();
|
||||
result = 31 * result + (routing != null ? routing.hashCode() : 0);
|
||||
result = 31 * result + (parent != null ? parent.hashCode() : 0);
|
||||
result = 31 * result + (fields != null ? Arrays.hashCode(fields) : 0);
|
||||
result = 31 * result + Long.hashCode(version);
|
||||
result = 31 * result + versionType.hashCode();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue