mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into cleanup/transport_bulk
This commit is contained in:
commit
248ac240ed
5
.gitignore
vendored
5
.gitignore
vendored
@ -20,6 +20,11 @@ nbactions.xml
|
||||
.gradle/
|
||||
build/
|
||||
|
||||
# gradle wrapper
|
||||
/gradle/
|
||||
gradlew
|
||||
gradlew.bat
|
||||
|
||||
# maven stuff (to be removed when trunk becomes 4.x)
|
||||
*-execution-hints.log
|
||||
target/
|
||||
|
@ -120,7 +120,8 @@ Please follow these formatting guidelines:
|
||||
* The rest is left to Java coding standards
|
||||
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
||||
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
||||
* Eclipse: Preferences->Java->Code Style->Organize Imports. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
||||
* Eclipse: `Preferences->Java->Code Style->Organize Imports`. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
||||
* IntelliJ: `Preferences->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
|
||||
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
|
||||
|
||||
To create a distribution from the source, simply run:
|
||||
|
@ -4,4 +4,4 @@ test -> test
|
||||
verify -> check
|
||||
verify -Dskip.unit.tests -> integTest
|
||||
package -DskipTests -> assemble
|
||||
install -DskipTests -> install
|
||||
install -DskipTests -> publishToMavenLocal
|
||||
|
@ -364,10 +364,12 @@ These are the linux flavors the Vagrantfile currently supports:
|
||||
* ubuntu-1204 aka precise
|
||||
* ubuntu-1404 aka trusty
|
||||
* ubuntu-1504 aka vivid
|
||||
* ubuntu-1604 aka xenial
|
||||
* debian-8 aka jessie, the current debian stable distribution
|
||||
* centos-6
|
||||
* centos-7
|
||||
* fedora-22
|
||||
* fedora-24
|
||||
* oel-6 aka Oracle Enterprise Linux 6
|
||||
* oel-7 aka Oracle Enterprise Linux 7
|
||||
* sles-12
|
||||
* opensuse-13
|
||||
@ -376,7 +378,6 @@ We're missing the following from the support matrix because there aren't high
|
||||
quality boxes available in vagrant atlas:
|
||||
|
||||
* sles-11
|
||||
* oel-6
|
||||
|
||||
We're missing the follow because our tests are very linux/bash centric:
|
||||
|
||||
|
11
Vagrantfile
vendored
11
Vagrantfile
vendored
@ -37,6 +37,13 @@ Vagrant.configure(2) do |config|
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
config.vm.define "ubuntu-1604" do |config|
|
||||
config.vm.box = "elastic/ubuntu-16.04-x86_64"
|
||||
ubuntu_common config, extra: <<-SHELL
|
||||
# Install Jayatana so we can work around it being present.
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
|
||||
# get the sun jdk on there just aren't worth it. We have jessie for testing
|
||||
# debian and it works fine.
|
||||
@ -78,8 +85,8 @@ Vagrant.configure(2) do |config|
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
config.vm.synced_folder ".", "/elasticsearch"
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
# Give the boxes 2GB so they can run our tests if they have to.
|
||||
v.memory = 2048
|
||||
# Give the boxes 3GB because Elasticsearch defaults to using 2GB
|
||||
v.memory = 3072
|
||||
end
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
config.cache.scope = :box
|
||||
|
@ -57,6 +57,9 @@ dependencies {
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
// enable the JMH's BenchmarkProcessor to generate the final benchmark classes
|
||||
// needs to be added separately otherwise Gradle will quote it and javac will fail
|
||||
compileJava.options.compilerArgs.addAll(["-processor", "org.openjdk.jmh.generators.BenchmarkProcessor"])
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
|
||||
forbiddenApis {
|
||||
|
@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.openjdk.jmh.annotations.Benchmark;
|
||||
import org.openjdk.jmh.annotations.BenchmarkMode;
|
||||
@ -160,11 +159,9 @@ public class AllocationBenchmark {
|
||||
public ClusterState measureAllocation() {
|
||||
ClusterState clusterState = initialClusterState;
|
||||
while (clusterState.getRoutingNodes().hasUnassignedShards()) {
|
||||
RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes()
|
||||
clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes()
|
||||
.shardsWithState(ShardRoutingState.INITIALIZING));
|
||||
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
|
||||
result = strategy.reroute(clusterState, "reroute");
|
||||
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
|
||||
clusterState = strategy.reroute(clusterState, "reroute");
|
||||
}
|
||||
return clusterState;
|
||||
}
|
||||
|
@ -22,10 +22,10 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedShard;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
@ -35,9 +35,9 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -50,12 +50,12 @@ public final class Allocators {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(StartedRerouteAllocation allocation) {
|
||||
public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
|
||||
// noop
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(FailedRerouteAllocation allocation) {
|
||||
public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
|
||||
// noop
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ public final class Allocators {
|
||||
|
||||
public static AllocationService createAllocationService(Settings settings) throws NoSuchMethodException, InstantiationException,
|
||||
IllegalAccessException, InvocationTargetException {
|
||||
return createAllocationService(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings
|
||||
return createAllocationService(settings, new ClusterSettings(Settings.EMPTY, ClusterSettings
|
||||
.BUILT_IN_CLUSTER_SETTINGS));
|
||||
}
|
||||
|
||||
@ -85,19 +85,9 @@ public final class Allocators {
|
||||
|
||||
public static AllocationDeciders defaultAllocationDeciders(Settings settings, ClusterSettings clusterSettings) throws
|
||||
IllegalAccessException, InvocationTargetException, InstantiationException, NoSuchMethodException {
|
||||
List<AllocationDecider> list = new ArrayList<>();
|
||||
// Keep a deterministic order of allocation deciders for the benchmark
|
||||
for (Class<? extends AllocationDecider> deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
|
||||
try {
|
||||
Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, ClusterSettings
|
||||
.class);
|
||||
list.add(constructor.newInstance(settings, clusterSettings));
|
||||
} catch (NoSuchMethodException e) {
|
||||
Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class);
|
||||
list.add(constructor.newInstance(settings));
|
||||
}
|
||||
}
|
||||
return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
|
||||
Collection<AllocationDecider> deciders =
|
||||
ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList());
|
||||
return new AllocationDeciders(settings, deciders);
|
||||
|
||||
}
|
||||
|
||||
|
@ -1,8 +0,0 @@
|
||||
# Do not log at all if it is not really critical - we're in a benchmark
|
||||
benchmarks.es.logger.level=ERROR
|
||||
log4j.rootLogger=${benchmarks.es.logger.level}, out
|
||||
|
||||
log4j.appender.out=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.out.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
|
||||
|
8
benchmarks/src/main/resources/log4j2.properties
Normal file
8
benchmarks/src/main/resources/log4j2.properties
Normal file
@ -0,0 +1,8 @@
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
|
||||
# Do not log at all if it is not really critical - we're in a benchmark
|
||||
rootLogger.level = error
|
||||
rootLogger.appenderRef.console.ref = console
|
63
build.gradle
63
build.gradle
@ -17,7 +17,6 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import com.bmuschko.gradle.nexus.NexusPlugin
|
||||
import org.eclipse.jgit.lib.Repository
|
||||
import org.eclipse.jgit.lib.RepositoryBuilder
|
||||
import org.gradle.plugins.ide.eclipse.model.SourceFolder
|
||||
@ -52,68 +51,6 @@ subprojects {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
plugins.withType(NexusPlugin).whenPluginAdded {
|
||||
modifyPom {
|
||||
project {
|
||||
url 'https://github.com/elastic/elasticsearch'
|
||||
inceptionYear '2009'
|
||||
|
||||
scm {
|
||||
url 'https://github.com/elastic/elasticsearch'
|
||||
connection 'scm:https://elastic@github.com/elastic/elasticsearch'
|
||||
developerConnection 'scm:git://github.com/elastic/elasticsearch.git'
|
||||
}
|
||||
|
||||
licenses {
|
||||
license {
|
||||
name 'The Apache Software License, Version 2.0'
|
||||
url 'http://www.apache.org/licenses/LICENSE-2.0.txt'
|
||||
distribution 'repo'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
extraArchive {
|
||||
javadoc = true
|
||||
tests = false
|
||||
}
|
||||
nexus {
|
||||
String buildSnapshot = System.getProperty('build.snapshot', 'true')
|
||||
if (buildSnapshot == 'false') {
|
||||
Repository repo = new RepositoryBuilder().findGitDir(project.rootDir).build()
|
||||
String shortHash = repo.resolve('HEAD')?.name?.substring(0,7)
|
||||
repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/"
|
||||
}
|
||||
}
|
||||
// we have our own username/password prompts so that they only happen once
|
||||
// TODO: add gpg signing prompts, which is tricky, as the buildDeb/buildRpm tasks are executed before this code block
|
||||
project.gradle.taskGraph.whenReady { taskGraph ->
|
||||
if (taskGraph.allTasks.any { it.name == 'uploadArchives' }) {
|
||||
Console console = System.console()
|
||||
// no need for username/password on local deploy
|
||||
if (project.nexus.repositoryUrl.startsWith('file://')) {
|
||||
project.rootProject.allprojects.each {
|
||||
it.ext.nexusUsername = 'foo'
|
||||
it.ext.nexusPassword = 'bar'
|
||||
}
|
||||
} else {
|
||||
if (project.hasProperty('nexusUsername') == false) {
|
||||
String nexusUsername = console.readLine('\nNexus username: ')
|
||||
project.rootProject.allprojects.each {
|
||||
it.ext.nexusUsername = nexusUsername
|
||||
}
|
||||
}
|
||||
if (project.hasProperty('nexusPassword') == false) {
|
||||
String nexusPassword = new String(console.readPassword('\nNexus password: '))
|
||||
project.rootProject.allprojects.each {
|
||||
it.ext.nexusPassword = nexusPassword
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allprojects {
|
||||
|
@ -95,7 +95,6 @@ dependencies {
|
||||
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
|
||||
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
|
||||
compile 'de.thetaphi:forbiddenapis:2.2'
|
||||
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
|
||||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1'
|
||||
}
|
||||
@ -109,10 +108,6 @@ if (project == rootProject) {
|
||||
|
||||
repositories {
|
||||
mavenCentral()
|
||||
maven {
|
||||
name 'sonatype-snapshots'
|
||||
url "https://oss.sonatype.org/content/repositories/snapshots/"
|
||||
}
|
||||
}
|
||||
test.exclude 'org/elasticsearch/test/NamingConventionsCheckBadClasses*'
|
||||
}
|
||||
|
@ -28,11 +28,10 @@ import org.gradle.api.Task
|
||||
import org.gradle.api.XmlProvider
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.artifacts.ModuleDependency
|
||||
import org.gradle.api.artifacts.ModuleVersionIdentifier
|
||||
import org.gradle.api.artifacts.ProjectDependency
|
||||
import org.gradle.api.artifacts.ResolvedArtifact
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.artifacts.maven.MavenPom
|
||||
import org.gradle.api.plugins.JavaPlugin
|
||||
import org.gradle.api.publish.maven.MavenPublication
|
||||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
|
||||
@ -63,7 +62,6 @@ class BuildPlugin implements Plugin<Project> {
|
||||
project.pluginManager.apply('nebula.info-java')
|
||||
project.pluginManager.apply('nebula.info-scm')
|
||||
project.pluginManager.apply('nebula.info-jar')
|
||||
project.pluginManager.apply('com.bmuschko.nexus')
|
||||
project.pluginManager.apply(ProvidedBasePlugin)
|
||||
|
||||
globalBuildInfo(project)
|
||||
@ -71,6 +69,8 @@ class BuildPlugin implements Plugin<Project> {
|
||||
configureConfigurations(project)
|
||||
project.ext.versions = VersionProperties.versions
|
||||
configureCompile(project)
|
||||
configureJavadocJar(project)
|
||||
configureSourcesJar(project)
|
||||
configurePomGeneration(project)
|
||||
|
||||
configureTest(project)
|
||||
@ -157,7 +157,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
private static String findJavaHome() {
|
||||
String javaHome = System.getenv('JAVA_HOME')
|
||||
if (javaHome == null) {
|
||||
if (System.getProperty("idea.active") != null) {
|
||||
if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) {
|
||||
// intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with
|
||||
javaHome = Jvm.current().javaHome
|
||||
} else {
|
||||
@ -267,11 +267,6 @@ class BuildPlugin implements Plugin<Project> {
|
||||
project.configurations.compile.dependencies.all(disableTransitiveDeps)
|
||||
project.configurations.testCompile.dependencies.all(disableTransitiveDeps)
|
||||
project.configurations.provided.dependencies.all(disableTransitiveDeps)
|
||||
|
||||
// add exclusions to the pom directly, for each of the transitive deps of this project's deps
|
||||
project.modifyPom { MavenPom pom ->
|
||||
pom.withXml(fixupDependencies(project))
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds repositores used by ES dependencies */
|
||||
@ -284,10 +279,6 @@ class BuildPlugin implements Plugin<Project> {
|
||||
repos.mavenLocal()
|
||||
}
|
||||
repos.mavenCentral()
|
||||
repos.maven {
|
||||
name 'sonatype-snapshots'
|
||||
url 'http://oss.sonatype.org/content/repositories/snapshots/'
|
||||
}
|
||||
String luceneVersion = VersionProperties.lucene
|
||||
if (luceneVersion.contains('-snapshot')) {
|
||||
// extract the revision number from the version with a regex matcher
|
||||
@ -394,14 +385,20 @@ class BuildPlugin implements Plugin<Project> {
|
||||
* -serial because we don't use java serialization.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-proc:none' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
|
||||
// either disable annotation processor completely (default) or allow to enable them if an annotation processor is explicitly defined
|
||||
if (options.compilerArgs.contains("-processor") == false) {
|
||||
options.compilerArgs << '-proc:none'
|
||||
}
|
||||
|
||||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
|
||||
if (project.javaVersion == JavaVersion.VERSION_1_9) {
|
||||
// hack until gradle supports java 9's new "-release" arg
|
||||
// hack until gradle supports java 9's new "--release" arg
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-release' << '8'
|
||||
options.compilerArgs << '--release' << '8'
|
||||
project.sourceCompatibility = null
|
||||
project.targetCompatibility = null
|
||||
}
|
||||
@ -409,6 +406,25 @@ class BuildPlugin implements Plugin<Project> {
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a javadocJar task to generate a jar containing javadocs. */
|
||||
static void configureJavadocJar(Project project) {
|
||||
Jar javadocJarTask = project.task('javadocJar', type: Jar)
|
||||
javadocJarTask.classifier = 'javadoc'
|
||||
javadocJarTask.group = 'build'
|
||||
javadocJarTask.description = 'Assembles a jar containing javadocs.'
|
||||
javadocJarTask.from(project.tasks.getByName(JavaPlugin.JAVADOC_TASK_NAME))
|
||||
project.assemble.dependsOn(javadocJarTask)
|
||||
}
|
||||
|
||||
static void configureSourcesJar(Project project) {
|
||||
Jar sourcesJarTask = project.task('sourcesJar', type: Jar)
|
||||
sourcesJarTask.classifier = 'sources'
|
||||
sourcesJarTask.group = 'build'
|
||||
sourcesJarTask.description = 'Assembles a jar containing source files.'
|
||||
sourcesJarTask.from(project.sourceSets.main.allSource)
|
||||
project.assemble.dependsOn(sourcesJarTask)
|
||||
}
|
||||
|
||||
/** Adds additional manifest info to jars, and adds source and javadoc jars */
|
||||
static void configureJars(Project project) {
|
||||
project.tasks.withType(Jar) { Jar jarTask ->
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
package org.elasticsearch.gradle.doc
|
||||
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.test.RestTestPlugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
@ -30,9 +31,19 @@ public class DocsTestPlugin extends RestTestPlugin {
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
super.apply(project)
|
||||
Map<String, String> defaultSubstitutions = [
|
||||
/* These match up with the asciidoc syntax for substitutions but
|
||||
* the values may differ. In particular {version} needs to resolve
|
||||
* to the version being built for testing but needs to resolve to
|
||||
* the last released version for docs. */
|
||||
'\\{version\\}':
|
||||
VersionProperties.elasticsearch.replace('-SNAPSHOT', ''),
|
||||
'\\{lucene_version\\}' : VersionProperties.lucene,
|
||||
]
|
||||
Task listSnippets = project.tasks.create('listSnippets', SnippetsTask)
|
||||
listSnippets.group 'Docs'
|
||||
listSnippets.description 'List each snippet'
|
||||
listSnippets.defaultSubstitutions = defaultSubstitutions
|
||||
listSnippets.perSnippet { println(it.toString()) }
|
||||
|
||||
Task listConsoleCandidates = project.tasks.create(
|
||||
@ -40,26 +51,15 @@ public class DocsTestPlugin extends RestTestPlugin {
|
||||
listConsoleCandidates.group 'Docs'
|
||||
listConsoleCandidates.description
|
||||
'List snippets that probably should be marked // CONSOLE'
|
||||
listConsoleCandidates.defaultSubstitutions = defaultSubstitutions
|
||||
listConsoleCandidates.perSnippet {
|
||||
if (
|
||||
it.console != null // Already marked, nothing to do
|
||||
|| it.testResponse // It is a response
|
||||
) {
|
||||
return
|
||||
if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) {
|
||||
println(it.toString())
|
||||
}
|
||||
List<String> languages = [
|
||||
// This language should almost always be marked console
|
||||
'js',
|
||||
// These are often curl commands that should be converted but
|
||||
// are probably false positives
|
||||
'sh', 'shell',
|
||||
]
|
||||
if (false == languages.contains(it.language)) {
|
||||
return
|
||||
}
|
||||
println(it.toString())
|
||||
}
|
||||
|
||||
project.tasks.create('buildRestTests', RestTestsFromSnippetsTask)
|
||||
Task buildRestTests = project.tasks.create(
|
||||
'buildRestTests', RestTestsFromSnippetsTask)
|
||||
buildRestTests.defaultSubstitutions = defaultSubstitutions
|
||||
}
|
||||
}
|
||||
|
@ -41,6 +41,16 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||
@Input
|
||||
Map<String, String> setups = new HashMap()
|
||||
|
||||
/**
|
||||
* A list of files that contain snippets that *probably* should be
|
||||
* converted to `// CONSOLE` but have yet to be converted. If a file is in
|
||||
* this list and doesn't contain unconverted snippets this task will fail.
|
||||
* If there are unconverted snippets not in this list then this task will
|
||||
* fail. All files are paths relative to the docs dir.
|
||||
*/
|
||||
@Input
|
||||
List<String> expectedUnconvertedCandidates = []
|
||||
|
||||
/**
|
||||
* Root directory of the tests being generated. To make rest tests happy
|
||||
* we generate them in a testRoot() which is contained in this directory.
|
||||
@ -56,6 +66,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||
TestBuilder builder = new TestBuilder()
|
||||
doFirst { outputRoot().delete() }
|
||||
perSnippet builder.&handleSnippet
|
||||
doLast builder.&checkUnconverted
|
||||
doLast builder.&finishLastTest
|
||||
}
|
||||
|
||||
@ -67,6 +78,27 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||
return new File(testRoot, '/rest-api-spec/test')
|
||||
}
|
||||
|
||||
/**
|
||||
* Is this snippet a candidate for conversion to `// CONSOLE`?
|
||||
*/
|
||||
static isConsoleCandidate(Snippet snippet) {
|
||||
/* Snippets that are responses or already marked as `// CONSOLE` or
|
||||
* `// NOTCONSOLE` are not candidates. */
|
||||
if (snippet.console != null || snippet.testResponse) {
|
||||
return false
|
||||
}
|
||||
/* js snippets almost always should be marked with `// CONSOLE`. js
|
||||
* snippets that shouldn't be marked `// CONSOLE`, like examples for
|
||||
* js client, should always be marked with `// NOTCONSOLE`.
|
||||
*
|
||||
* `sh` snippets that contain `curl` almost always should be marked
|
||||
* with `// CONSOLE`. In the exceptionally rare cases where they are
|
||||
* not communicating with Elasticsearch, like the xamples in the ec2
|
||||
* and gce discovery plugins, the snippets should be marked
|
||||
* `// NOTCONSOLE`. */
|
||||
return snippet.language == 'js' || snippet.curl
|
||||
}
|
||||
|
||||
private class TestBuilder {
|
||||
private static final String SYNTAX = {
|
||||
String method = /(?<method>GET|PUT|POST|HEAD|OPTIONS|DELETE)/
|
||||
@ -88,11 +120,22 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||
*/
|
||||
PrintWriter current
|
||||
|
||||
/**
|
||||
* Files containing all snippets that *probably* should be converted
|
||||
* to `// CONSOLE` but have yet to be converted. All files are paths
|
||||
* relative to the docs dir.
|
||||
*/
|
||||
Set<String> unconvertedCandidates = new HashSet<>()
|
||||
|
||||
/**
|
||||
* Called each time a snippet is encountered. Tracks the snippets and
|
||||
* calls buildTest to actually build the test.
|
||||
*/
|
||||
void handleSnippet(Snippet snippet) {
|
||||
if (RestTestsFromSnippetsTask.isConsoleCandidate(snippet)) {
|
||||
unconvertedCandidates.add(snippet.path.toString()
|
||||
.replace('\\', '/'))
|
||||
}
|
||||
if (BAD_LANGUAGES.contains(snippet.language)) {
|
||||
throw new InvalidUserDataException(
|
||||
"$snippet: Use `js` instead of `${snippet.language}`.")
|
||||
@ -117,7 +160,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||
|
||||
if (false == test.continued) {
|
||||
current.println('---')
|
||||
current.println("\"$test.start\":")
|
||||
current.println("\"line_$test.start\":")
|
||||
}
|
||||
if (test.skipTest) {
|
||||
current.println(" - skip:")
|
||||
@ -146,6 +189,14 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||
void emitDo(String method, String pathAndQuery, String body,
|
||||
String catchPart, List warnings, boolean inSetup) {
|
||||
def (String path, String query) = pathAndQuery.tokenize('?')
|
||||
if (path == null) {
|
||||
path = '' // Catch requests to the root...
|
||||
} else {
|
||||
// Escape some characters that are also escaped by sense
|
||||
path = path.replace('<', '%3C').replace('>', '%3E')
|
||||
path = path.replace('{', '%7B').replace('}', '%7D')
|
||||
path = path.replace('|', '%7C')
|
||||
}
|
||||
current.println(" - do:")
|
||||
if (catchPart != null) {
|
||||
current.println(" catch: $catchPart")
|
||||
@ -247,5 +298,35 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||
current = null
|
||||
}
|
||||
}
|
||||
|
||||
void checkUnconverted() {
|
||||
List<String> listedButNotFound = []
|
||||
for (String listed : expectedUnconvertedCandidates) {
|
||||
if (false == unconvertedCandidates.remove(listed)) {
|
||||
listedButNotFound.add(listed)
|
||||
}
|
||||
}
|
||||
String message = ""
|
||||
if (false == listedButNotFound.isEmpty()) {
|
||||
Collections.sort(listedButNotFound)
|
||||
listedButNotFound = listedButNotFound.collect {' ' + it}
|
||||
message += "Expected unconverted snippets but none found in:\n"
|
||||
message += listedButNotFound.join("\n")
|
||||
}
|
||||
if (false == unconvertedCandidates.isEmpty()) {
|
||||
List<String> foundButNotListed =
|
||||
new ArrayList<>(unconvertedCandidates)
|
||||
Collections.sort(foundButNotListed)
|
||||
foundButNotListed = foundButNotListed.collect {' ' + it}
|
||||
if (false == "".equals(message)) {
|
||||
message += "\n"
|
||||
}
|
||||
message += "Unexpected unconverted snippets:\n"
|
||||
message += foundButNotListed.join("\n")
|
||||
}
|
||||
if (false == "".equals(message)) {
|
||||
throw new InvalidUserDataException(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.gradle.doc
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.ConfigurableFileTree
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
@ -38,6 +39,7 @@ public class SnippetsTask extends DefaultTask {
|
||||
private static final String SKIP = /skip:([^\]]+)/
|
||||
private static final String SETUP = /setup:([^ \]]+)/
|
||||
private static final String WARNING = /warning:(.+)/
|
||||
private static final String CAT = /(_cat)/
|
||||
private static final String TEST_SYNTAX =
|
||||
/(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/
|
||||
|
||||
@ -60,6 +62,12 @@ public class SnippetsTask extends DefaultTask {
|
||||
exclude 'build'
|
||||
}
|
||||
|
||||
/**
|
||||
* Substitutions done on every snippet's contents.
|
||||
*/
|
||||
@Input
|
||||
Map<String, String> defaultSubstitutions = [:]
|
||||
|
||||
@TaskAction
|
||||
public void executeTask() {
|
||||
/*
|
||||
@ -75,21 +83,39 @@ public class SnippetsTask extends DefaultTask {
|
||||
Closure emit = {
|
||||
snippet.contents = contents.toString()
|
||||
contents = null
|
||||
Closure doSubstitution = { String pattern, String subst ->
|
||||
/*
|
||||
* $body is really common but it looks like a
|
||||
* backreference so we just escape it here to make the
|
||||
* tests cleaner.
|
||||
*/
|
||||
subst = subst.replace('$body', '\\$body')
|
||||
// \n is a new line....
|
||||
subst = subst.replace('\\n', '\n')
|
||||
snippet.contents = snippet.contents.replaceAll(
|
||||
pattern, subst)
|
||||
}
|
||||
defaultSubstitutions.each doSubstitution
|
||||
if (substitutions != null) {
|
||||
substitutions.each { String pattern, String subst ->
|
||||
/*
|
||||
* $body is really common but it looks like a
|
||||
* backreference so we just escape it here to make the
|
||||
* tests cleaner.
|
||||
*/
|
||||
subst = subst.replace('$body', '\\$body')
|
||||
// \n is a new line....
|
||||
subst = subst.replace('\\n', '\n')
|
||||
snippet.contents = snippet.contents.replaceAll(
|
||||
pattern, subst)
|
||||
}
|
||||
substitutions.each doSubstitution
|
||||
substitutions = null
|
||||
}
|
||||
if (snippet.language == null) {
|
||||
throw new InvalidUserDataException("$snippet: "
|
||||
+ "Snippet missing a language. This is required by "
|
||||
+ "Elasticsearch's doc testing infrastructure so we "
|
||||
+ "be sure we don't accidentally forget to test a "
|
||||
+ "snippet.")
|
||||
}
|
||||
// Try to detect snippets that contain `curl`
|
||||
if (snippet.language == 'sh' || snippet.language == 'shell') {
|
||||
snippet.curl = snippet.contents.contains('curl')
|
||||
if (snippet.console == false && snippet.curl == false) {
|
||||
throw new InvalidUserDataException("$snippet: "
|
||||
+ "No need for NOTCONSOLE if snippet doesn't "
|
||||
+ "contain `curl`.")
|
||||
}
|
||||
}
|
||||
perSnippet(snippet)
|
||||
snippet = null
|
||||
}
|
||||
@ -107,7 +133,7 @@ public class SnippetsTask extends DefaultTask {
|
||||
}
|
||||
return
|
||||
}
|
||||
matcher = line =~ /\[source,(\w+)]\s*/
|
||||
matcher = line =~ /\["?source"?,\s*"?(\w+)"?(,.*)?].*/
|
||||
if (matcher.matches()) {
|
||||
lastLanguage = matcher.group(1)
|
||||
lastLanguageLine = lineNumber
|
||||
@ -196,8 +222,17 @@ public class SnippetsTask extends DefaultTask {
|
||||
substitutions = []
|
||||
}
|
||||
String loc = "$file:$lineNumber"
|
||||
parse(loc, matcher.group(2), /$SUBSTITUTION ?/) {
|
||||
substitutions.add([it.group(1), it.group(2)])
|
||||
parse(loc, matcher.group(2), /(?:$SUBSTITUTION|$CAT) ?/) {
|
||||
if (it.group(1) != null) {
|
||||
// TESTRESPONSE[s/adsf/jkl/]
|
||||
substitutions.add([it.group(1), it.group(2)])
|
||||
} else if (it.group(3) != null) {
|
||||
// TESTRESPONSE[_cat]
|
||||
substitutions.add(['^', '/'])
|
||||
substitutions.add(['\n$', '\\\\s*/'])
|
||||
substitutions.add(['( +)', '$1\\\\s+'])
|
||||
substitutions.add(['\n', '\\\\s*\n '])
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -250,6 +285,7 @@ public class SnippetsTask extends DefaultTask {
|
||||
String language = null
|
||||
String catchPart = null
|
||||
String setup = null
|
||||
boolean curl
|
||||
List warnings = new ArrayList()
|
||||
|
||||
@Override
|
||||
@ -285,6 +321,9 @@ public class SnippetsTask extends DefaultTask {
|
||||
if (testSetup) {
|
||||
result += '// TESTSETUP'
|
||||
}
|
||||
if (curl) {
|
||||
result += '(curl)'
|
||||
}
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
@ -56,12 +56,8 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
// for plugins which work with the transport client, we copy the jar
|
||||
// file to a new name, copy the nebula generated pom to the same name,
|
||||
// and generate a different pom for the zip
|
||||
project.signArchives.enabled = false
|
||||
addClientJarPomGeneration(project)
|
||||
addClientJarTask(project)
|
||||
if (isModule == false) {
|
||||
addZipPomGeneration(project)
|
||||
}
|
||||
} else {
|
||||
// no client plugin, so use the pom file from nebula, without jar, for the zip
|
||||
project.ext.set("nebulaPublish.maven.jar", false)
|
||||
@ -97,8 +93,8 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
// with a full elasticsearch server that includes optional deps
|
||||
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||
provided "log4j:log4j:${project.versions.log4j}"
|
||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||
provided "org.apache.logging.log4j:log4j-api:${project.versions.log4j}"
|
||||
provided "org.apache.logging.log4j:log4j-core:${project.versions.log4j}"
|
||||
provided "net.java.dev.jna:jna:${project.versions.jna}"
|
||||
}
|
||||
}
|
||||
@ -152,7 +148,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
/** Adds a task to move jar and associated files to a "-client" name. */
|
||||
protected static void addClientJarTask(Project project) {
|
||||
Task clientJar = project.tasks.create('clientJar')
|
||||
clientJar.dependsOn('generatePomFileForJarPublication', project.jar, project.javadocJar, project.sourcesJar)
|
||||
clientJar.dependsOn(project.jar, 'generatePomFileForClientJarPublication', project.javadocJar, project.sourcesJar)
|
||||
clientJar.doFirst {
|
||||
Path jarFile = project.jar.outputs.files.singleFile.toPath()
|
||||
String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}")
|
||||
@ -179,7 +175,10 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/)
|
||||
|
||||
/** Find the reponame. */
|
||||
protected static String urlFromOrigin(String origin) {
|
||||
static String urlFromOrigin(String origin) {
|
||||
if (origin == null) {
|
||||
return null // best effort, the url doesnt really matter, it is just required by maven central
|
||||
}
|
||||
if (origin.startsWith('https')) {
|
||||
return origin
|
||||
}
|
||||
@ -197,9 +196,9 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
|
||||
project.publishing {
|
||||
publications {
|
||||
jar(MavenPublication) {
|
||||
clientJar(MavenPublication) {
|
||||
from project.components.java
|
||||
artifactId = artifactId + '-client'
|
||||
artifactId = project.pluginProperties.extension.name + '-client'
|
||||
pom.withXml { XmlProvider xml ->
|
||||
Node root = xml.asNode()
|
||||
root.appendNode('name', project.pluginProperties.extension.name)
|
||||
@ -213,7 +212,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to generate a*/
|
||||
/** Adds a task to generate a pom file for the zip distribution. */
|
||||
protected void addZipPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenPublishPlugin.class)
|
||||
|
||||
@ -221,7 +220,18 @@ public class PluginBuildPlugin extends BuildPlugin {
|
||||
publications {
|
||||
zip(MavenPublication) {
|
||||
artifact project.bundlePlugin
|
||||
pom.packaging = 'pom'
|
||||
}
|
||||
/* HUGE HACK: the underlying maven publication library refuses to deploy any attached artifacts
|
||||
* when the packaging type is set to 'pom'. But Sonatype's OSS repositories require source files
|
||||
* for artifacts that are of type 'zip'. We already publish the source and javadoc for Elasticsearch
|
||||
* under the various other subprojects. So here we create another publication using the same
|
||||
* name that has the "real" pom, and rely on the fact that gradle will execute the publish tasks
|
||||
* in alphabetical order. This lets us publish the zip file and even though the pom says the
|
||||
* type is 'pom' instead of 'zip'. We cannot setup a dependency between the tasks because the
|
||||
* publishing tasks are created *extremely* late in the configuration phase, so that we cannot get
|
||||
* ahold of the actual task. Furthermore, this entire hack only exists so we can make publishing to
|
||||
* maven local work, since we publish to maven central externally. */
|
||||
zipReal(MavenPublication) {
|
||||
pom.withXml { XmlProvider xml ->
|
||||
Node root = xml.asNode()
|
||||
root.appendNode('name', project.pluginProperties.extension.name)
|
||||
|
@ -147,6 +147,9 @@ class PrecommitTasks {
|
||||
checkstyleTask.dependsOn(task)
|
||||
task.dependsOn(copyCheckstyleConf)
|
||||
task.inputs.file(checkstyleSuppressions)
|
||||
task.reports {
|
||||
html.enabled false
|
||||
}
|
||||
}
|
||||
}
|
||||
return checkstyleTask
|
||||
|
@ -20,8 +20,6 @@ package org.elasticsearch.gradle.test
|
||||
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
/** Configuration for an elasticsearch cluster, used for integration tests. */
|
||||
@ -47,6 +45,17 @@ class ClusterConfiguration {
|
||||
@Input
|
||||
int transportPort = 0
|
||||
|
||||
/**
|
||||
* An override of the data directory. This may only be used with a single node.
|
||||
* The value is lazily evaluated at runtime as a String path.
|
||||
*/
|
||||
@Input
|
||||
Object dataDir = null
|
||||
|
||||
/** Optional override of the cluster name. */
|
||||
@Input
|
||||
String clusterName = null
|
||||
|
||||
@Input
|
||||
boolean daemonize = true
|
||||
|
||||
@ -54,16 +63,29 @@ class ClusterConfiguration {
|
||||
boolean debug = false
|
||||
|
||||
@Input
|
||||
String jvmArgs = System.getProperty('tests.jvm.argline', '')
|
||||
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
/**
|
||||
* The seed nodes port file. In the case the cluster has more than one node we use a seed node
|
||||
* to form the cluster. The file is null if there is no seed node yet available.
|
||||
* A closure to call which returns the unicast host to connect to for cluster formation.
|
||||
*
|
||||
* Note: this can only be null if the cluster has only one node or if the first node is not yet
|
||||
* configured. All nodes but the first node should see a non null value.
|
||||
* This allows multi node clusters, or a new cluster to connect to an existing cluster.
|
||||
* The closure takes two arguments, the NodeInfo for the first node in the cluster, and
|
||||
* an AntBuilder which may be used to wait on conditions before returning.
|
||||
*/
|
||||
File seedNodePortsFile
|
||||
@Input
|
||||
Closure unicastTransportUri = { NodeInfo seedNode, NodeInfo node, AntBuilder ant ->
|
||||
if (seedNode == node) {
|
||||
return null
|
||||
}
|
||||
ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
|
||||
resourceexists {
|
||||
file(file: seedNode.transportPortsFile.toString())
|
||||
}
|
||||
}
|
||||
return seedNode.transportUri()
|
||||
}
|
||||
|
||||
/**
|
||||
* A closure to call before the cluster is considered ready. The closure is passed the node info,
|
||||
@ -73,7 +95,11 @@ class ClusterConfiguration {
|
||||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
|
||||
ant.echo("==> [${new Date()}] checking health: http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}")
|
||||
// checking here for wait_for_nodes to be >= the number of nodes because its possible
|
||||
// this cluster is attempting to connect to nodes created by another task (same cluster name),
|
||||
// so there will be more nodes in that case in the cluster state
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
@ -135,12 +161,4 @@ class ClusterConfiguration {
|
||||
}
|
||||
extraConfigFiles.put(path, sourceFile)
|
||||
}
|
||||
|
||||
/** Returns an address and port suitable for a uri to connect to this clusters seed node over transport protocol*/
|
||||
String seedNodeTransportUri() {
|
||||
if (seedNodePortsFile != null) {
|
||||
return seedNodePortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -46,9 +46,9 @@ class ClusterFormationTasks {
|
||||
/**
|
||||
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
|
||||
*
|
||||
* Returns a NodeInfo object for the first node in the cluster.
|
||||
* Returns a list of NodeInfo objects for each node in the cluster.
|
||||
*/
|
||||
static NodeInfo setup(Project project, Task task, ClusterConfiguration config) {
|
||||
static List<NodeInfo> setup(Project project, Task task, ClusterConfiguration config) {
|
||||
if (task.getEnabled() == false) {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
@ -72,10 +72,9 @@ class ClusterFormationTasks {
|
||||
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
|
||||
}
|
||||
// this is our current version distribution configuration we use for all kinds of REST tests etc.
|
||||
project.configurations {
|
||||
elasticsearchDistro
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchDistro, VersionProperties.elasticsearch)
|
||||
String distroConfigName = "${task.name}_elasticsearchDistro"
|
||||
Configuration distro = project.configurations.create(distroConfigName)
|
||||
configureDistributionDependency(project, config.distribution, distro, VersionProperties.elasticsearch)
|
||||
if (config.bwcVersion != null && config.numBwcNodes > 0) {
|
||||
// if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version
|
||||
// this version uses the same distribution etc. and only differs in the version we depend on.
|
||||
@ -91,28 +90,19 @@ class ClusterFormationTasks {
|
||||
// we start N nodes and out of these N nodes there might be M bwc nodes.
|
||||
// for each of those nodes we might have a different configuratioon
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
Configuration configuration = project.configurations.elasticsearchDistro
|
||||
if (i < config.numBwcNodes) {
|
||||
elasticsearchVersion = config.bwcVersion
|
||||
configuration = project.configurations.elasticsearchBwcDistro
|
||||
distro = project.configurations.elasticsearchBwcDistro
|
||||
}
|
||||
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
|
||||
if (i == 0) {
|
||||
if (config.seedNodePortsFile != null) {
|
||||
// we might allow this in the future to be set but for now we are the only authority to set this!
|
||||
throw new GradleException("seedNodePortsFile has a non-null value but first node has not been intialized")
|
||||
}
|
||||
config.seedNodePortsFile = node.transportPortsFile;
|
||||
}
|
||||
nodes.add(node)
|
||||
startTasks.add(configureNode(project, task, cleanup, node, configuration))
|
||||
startTasks.add(configureNode(project, task, cleanup, node, distro, nodes.get(0)))
|
||||
}
|
||||
|
||||
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
|
||||
task.dependsOn(wait)
|
||||
|
||||
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
|
||||
return nodes[0]
|
||||
return nodes
|
||||
}
|
||||
|
||||
/** Adds a dependency on the given distribution */
|
||||
@ -143,7 +133,7 @@ class ClusterFormationTasks {
|
||||
*
|
||||
* @return a task which starts the node.
|
||||
*/
|
||||
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration) {
|
||||
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration, NodeInfo seedNode) {
|
||||
|
||||
// tasks are chained so their execution order is maintained
|
||||
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
||||
@ -156,8 +146,7 @@ class ClusterFormationTasks {
|
||||
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
|
||||
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node)
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode)
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
|
||||
// install modules
|
||||
@ -172,6 +161,10 @@ class ClusterFormationTasks {
|
||||
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
|
||||
}
|
||||
|
||||
// sets up any extra config files that need to be copied over to the ES instance;
|
||||
// its run after plugins have been installed, as the extra config files may belong to plugins
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
|
||||
// extra setup commands
|
||||
for (Map.Entry<String, Object[]> command : node.config.setupCommands.entrySet()) {
|
||||
// the first argument is the actual script name, relative to home
|
||||
@ -183,9 +176,10 @@ class ClusterFormationTasks {
|
||||
Task start = configureStartTask(taskName(task, node, 'start'), project, setup, node)
|
||||
|
||||
if (node.config.daemonize) {
|
||||
// if we are running in the background, make sure to stop the server when the task completes
|
||||
Task stop = configureStopTask(taskName(task, node, 'stop'), project, [], node)
|
||||
// if we are running in the background, make sure to stop the server when the task completes
|
||||
task.finalizedBy(stop)
|
||||
start.finalizedBy(stop)
|
||||
}
|
||||
return start
|
||||
}
|
||||
@ -251,7 +245,7 @@ class ClusterFormationTasks {
|
||||
}
|
||||
|
||||
/** Adds a task to write elasticsearch.yml for the given node configuration */
|
||||
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node, NodeInfo seedNode) {
|
||||
Map esConfig = [
|
||||
'cluster.name' : node.clusterName,
|
||||
'pidfile' : node.pidFile,
|
||||
@ -268,15 +262,9 @@ class ClusterFormationTasks {
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
writeConfig.doFirst {
|
||||
if (node.nodeNum > 0) { // multi-node cluster case, we have to wait for the seed node to startup
|
||||
ant.waitfor(maxwait: '20', maxwaitunit: 'second', checkevery: '500', checkeveryunit: 'millisecond') {
|
||||
resourceexists {
|
||||
file(file: node.config.seedNodePortsFile.toString())
|
||||
}
|
||||
}
|
||||
// the seed node is enough to form the cluster - all subsequent nodes will get the seed node as a unicast
|
||||
// host and join the cluster via that.
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = "\"${node.config.seedNodeTransportUri()}\""
|
||||
String unicastTransportUri = node.config.unicastTransportUri(seedNode, node, project.ant)
|
||||
if (unicastTransportUri != null) {
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = "\"${unicastTransportUri}\""
|
||||
}
|
||||
File configFile = new File(node.confDir, 'elasticsearch.yml')
|
||||
logger.info("Configuring ${configFile}")
|
||||
|
@ -57,6 +57,9 @@ class NodeInfo {
|
||||
/** config directory */
|
||||
File confDir
|
||||
|
||||
/** data directory (as an Object, to allow lazy evaluation) */
|
||||
Object dataDir
|
||||
|
||||
/** THE config file */
|
||||
File configFile
|
||||
|
||||
@ -95,11 +98,23 @@ class NodeInfo {
|
||||
this.config = config
|
||||
this.nodeNum = nodeNum
|
||||
this.sharedDir = sharedDir
|
||||
clusterName = "${task.path.replace(':', '_').substring(1)}"
|
||||
if (config.clusterName != null) {
|
||||
clusterName = config.clusterName
|
||||
} else {
|
||||
clusterName = "${task.path.replace(':', '_').substring(1)}"
|
||||
}
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
|
||||
confDir = confDir(baseDir, config.distribution, nodeVersion)
|
||||
if (config.dataDir != null) {
|
||||
if (config.numNodes != 1) {
|
||||
throw new IllegalArgumentException("Cannot set data dir for integ test with more than one node")
|
||||
}
|
||||
dataDir = config.dataDir
|
||||
} else {
|
||||
dataDir = new File(homeDir, "data")
|
||||
}
|
||||
configFile = new File(confDir, 'elasticsearch.yml')
|
||||
// even for rpm/deb, the logs are under home because we dont start with real services
|
||||
File logsDir = new File(homeDir, 'logs')
|
||||
@ -140,7 +155,7 @@ class NodeInfo {
|
||||
}
|
||||
}
|
||||
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
|
||||
args.addAll("-E", "path.conf=${confDir}")
|
||||
args.addAll("-E", "path.conf=${confDir}", "-E", "path.data=${-> dataDir.toString()}")
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
args.add('"') // end the entire command, quoted
|
||||
}
|
||||
@ -184,6 +199,19 @@ class NodeInfo {
|
||||
return transportPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the file which contains the transport protocol ports for this node */
|
||||
File getTransportPortsFile() {
|
||||
return transportPortsFile
|
||||
}
|
||||
|
||||
/** Returns the data directory for this node */
|
||||
File getDataDir() {
|
||||
if (!(dataDir instanceof File)) {
|
||||
return new File(dataDir)
|
||||
}
|
||||
return dataDir
|
||||
}
|
||||
|
||||
/** Returns the directory elasticsearch home is contained in for the given distribution */
|
||||
static File homeDir(File baseDir, String distro, String nodeVersion) {
|
||||
String path
|
||||
|
@ -34,6 +34,9 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
|
||||
ClusterConfiguration clusterConfig
|
||||
|
||||
/** Info about nodes in the integ test cluster. Note this is *not* available until runtime. */
|
||||
List<NodeInfo> nodes
|
||||
|
||||
/** Flag indicating whether the rest tests in the rest spec should be run. */
|
||||
@Input
|
||||
boolean includePackaged = false
|
||||
@ -52,6 +55,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
parallelism = '1'
|
||||
include('**/*IT.class')
|
||||
systemProperty('tests.rest.load_packaged', 'false')
|
||||
systemProperty('tests.rest.cluster', "${-> nodes[0].httpUri()}")
|
||||
systemProperty('tests.config.dir', "${-> nodes[0].confDir}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")
|
||||
|
||||
// copy the rest spec/tests into the test resources
|
||||
RestSpecHack.configureDependencies(project)
|
||||
@ -61,13 +70,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
|
||||
systemProperty('tests.config.dir', "${-> node.confDir}")
|
||||
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
|
||||
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
|
||||
// both as separate sysprops
|
||||
systemProperty('tests.cluster', "${-> node.transportUri()}")
|
||||
nodes = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
}
|
||||
}
|
||||
|
||||
@ -88,6 +91,10 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
return clusterConfig
|
||||
}
|
||||
|
||||
public List<NodeInfo> getNodes() {
|
||||
return nodes
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task dependsOn(Object... dependencies) {
|
||||
super.dependsOn(dependencies)
|
||||
|
@ -43,18 +43,22 @@ public class RestSpecHack {
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a task to copy the rest spec files.
|
||||
* Creates a task (if necessary) to copy the rest spec files.
|
||||
*
|
||||
* @param project The project to add the copy task to
|
||||
* @param includePackagedTests true if the packaged tests should be copied, false otherwise
|
||||
*/
|
||||
public static Task configureTask(Project project, boolean includePackagedTests) {
|
||||
Task copyRestSpec = project.tasks.findByName('copyRestSpec')
|
||||
if (copyRestSpec != null) {
|
||||
return copyRestSpec
|
||||
}
|
||||
Map copyRestSpecProps = [
|
||||
name : 'copyRestSpec',
|
||||
type : Copy,
|
||||
dependsOn: [project.configurations.restSpec, 'processTestResources']
|
||||
]
|
||||
Task copyRestSpec = project.tasks.create(copyRestSpecProps) {
|
||||
copyRestSpec = project.tasks.create(copyRestSpecProps) {
|
||||
from { project.zipTree(project.configurations.restSpec.singleFile) }
|
||||
include 'rest-api-spec/api/**'
|
||||
if (includePackagedTests) {
|
||||
|
@ -10,6 +10,9 @@
|
||||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessLexer\.java" checks="." />
|
||||
<suppress files="org[/\\]elasticsearch[/\\]painless[/\\]antlr[/\\]PainlessParser(|BaseVisitor|Visitor)\.java" checks="." />
|
||||
|
||||
<!-- ThrowableProxy is a forked copy from Log4j to hack around a bug; this can be removed when the hack is removed -->
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]logging[/\\]log4j[/\\]core[/\\]impl[/\\]ThrowableProxy.java" checks="RegexpSinglelineJava" />
|
||||
|
||||
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
|
||||
files start to pass. -->
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQuery.java" checks="LineLength" />
|
||||
@ -21,7 +24,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]ClusterHealthRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]health[/\\]TransportClusterHealthAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]hotthreads[/\\]NodesHotThreadsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]info[/\\]NodeInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]node[/\\]stats[/\\]NodesStatsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]DeleteRepositoryRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]repositories[/\\]delete[/\\]TransportDeleteRepositoryAction.java" checks="LineLength" />
|
||||
@ -236,7 +238,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]IncompatibleClusterStateVersionException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]InternalClusterInfoService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]LocalNodeMasterListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeIndexDeletedAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeMappingRefreshAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]shard[/\\]ShardStateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]block[/\\]ClusterBlock.java" checks="LineLength" />
|
||||
@ -248,8 +249,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MappingMetaData.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaData.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataCreateIndexService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataDeleteIndexService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexAliasesService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexStateService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexTemplateService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataIndexUpgradeService.java" checks="LineLength" />
|
||||
@ -303,7 +302,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkModule.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]network[/\\]NetworkService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]recycler[/\\]Recyclers.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]unit[/\\]ByteSizeValue.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]BigArrays.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CancellableThreads.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" />
|
||||
@ -344,7 +342,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicyConfig.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLog.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CommonGramsTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CustomAnalyzerProvider.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]NumericDoubleAnalyzer.java" checks="LineLength" />
|
||||
@ -388,7 +385,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MapperService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]Mapping.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]MetadataFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]ParsedDocument.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]CompletionFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDateFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]LegacyDoubleFieldMapper.java" checks="LineLength" />
|
||||
@ -417,7 +413,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]RootObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]merge[/\\]MergeStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryValidationException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]InnerHitsQueryParserHelper.java" checks="LineLength" />
|
||||
@ -460,7 +455,7 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveriesCollection.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryFailedException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySettings.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoverySource.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]PeerRecoverySourceService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]RecoveryState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]recovery[/\\]StartRecoveryRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]IndicesStore.java" checks="LineLength" />
|
||||
@ -471,9 +466,7 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]RemovePluginCommand.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoriesService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]Repository.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]RepositoryModule.java" checks="LineLength" />
|
||||
@ -492,15 +485,12 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestPendingClusterTasksAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]AbstractScriptParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptModes.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptParameterParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptSettings.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]Template.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
|
||||
@ -559,29 +549,19 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]ValuesSourceParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueFormat.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]builder[/\\]SearchSourceBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]controller[/\\]SearchPhaseController.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]AggregatedDfs.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]DfsSearchResult.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSearchResult.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhaseParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]DefaultSearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]FilteredSearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]InternalSearchHit.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]SearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]ShardSearchTransportRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]FieldLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafDocLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafFieldsLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]QueryPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]QueryRescorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]RescoreParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]GeoDistanceSortParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]ScriptSortParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]SortParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestContextParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestUtils.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CompletionSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]CategoryContextMapping.java" checks="LineLength" />
|
||||
@ -590,9 +570,7 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoQueryContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]CandidateScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]NoisyChannelSpellChecker.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]PhraseSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]WordScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]term[/\\]TermSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]RestoreService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
|
||||
@ -629,7 +607,7 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]SimulatePipelineResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]ingest[/\\]WriteableIngestDocumentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]SearchRequestBuilderTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]AutoCreateIndexTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]IndicesOptionsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]TransportActionFilterChainTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]WaitActiveShardCountIT.java" checks="LineLength" />
|
||||
@ -688,7 +666,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]AllocationPriorityTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]AwarenessAllocationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]BalanceConfigurationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]CatAllocationTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ClusterRebalanceRoutingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]ConcurrentRebalanceRoutingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]allocation[/\\]DeadNodesAllocationTests.java" checks="LineLength" />
|
||||
@ -723,7 +700,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]settings[/\\]ClusterSettingsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]structure[/\\]RoutingIteratorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]codecs[/\\]CodecTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]BooleansTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreContainerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreTests.java" checks="LineLength" />
|
||||
@ -869,7 +845,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedFileIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedTranslogIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]DirectoryUtilsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]ExceptionRetryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]StoreTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]suggest[/\\]stats[/\\]SuggestStatsIT.java" checks="LineLength" />
|
||||
@ -909,8 +884,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]template[/\\]SimpleIndexTemplateIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mget[/\\]SimpleMgetIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />
|
||||
@ -961,7 +934,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]TransportTwoNodesSearchIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ChildQuerySearchIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ParentFieldLoadingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhasePluginIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoBoundingBoxIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoFilterIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoShapeQueryTests.java" checks="LineLength" />
|
||||
@ -998,9 +970,7 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBlobStoreRepositoryIntegTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]geo[/\\]RandomShapeGenerator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchGeoAssertions.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPoolSerializationTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]timestamp[/\\]SimpleTimestampIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ttl[/\\]SimpleTTLIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]update[/\\]UpdateIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]validate[/\\]SimpleValidateQueryIT.java" checks="LineLength" />
|
||||
@ -1017,7 +987,6 @@
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateShardResponse.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
|
||||
@ -1045,14 +1014,6 @@
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptEngineTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonScriptMultiThreadedTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]lang-python[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]python[/\\]PythonSecurityTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]AttachmentMapper.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]DateAttachmentMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]EncryptedDocMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]MetadataMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]MultifieldAttachmentMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]SimpleAttachmentMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]StandaloneRunner.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-attachments[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]mapper[/\\]attachments[/\\]VariousDocTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapper.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperUpgradeTests.java" checks="LineLength" />
|
||||
@ -1085,7 +1046,6 @@
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]BackgroundIndexer.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CompositeTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CorruptionUtils.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESAllocationTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESBackcompatTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESIntegTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESSingleNodeTestCase.java" checks="LineLength" />
|
||||
@ -1095,7 +1055,6 @@
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]IndexSettingsModule.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]MockIndexEventListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]TestSearchContext.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]cluster[/\\]NoopClusterService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]cluster[/\\]TestClusterService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]discovery[/\\]ClusterDiscoveryConfiguration.java" checks="LineLength" />
|
||||
|
@ -1,12 +1,12 @@
|
||||
elasticsearch = 5.0.0-alpha6
|
||||
lucene = 6.1.0
|
||||
elasticsearch = 6.0.0-alpha1
|
||||
lucene = 6.2.0
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.6
|
||||
jts = 1.13
|
||||
jackson = 2.8.1
|
||||
snakeyaml = 1.15
|
||||
log4j = 1.2.17
|
||||
log4j = 2.6.2
|
||||
slf4j = 1.6.2
|
||||
jna = 4.2.2
|
||||
|
||||
@ -20,4 +20,4 @@ commonscodec = 1.10
|
||||
hamcrest = 1.3
|
||||
securemock = 1.2
|
||||
# benchmark dependencies
|
||||
jmh = 1.12
|
||||
jmh = 1.14
|
||||
|
@ -1,34 +1,53 @@
|
||||
Steps to execute the benchmark:
|
||||
### Steps to execute the benchmark
|
||||
|
||||
1. Start Elasticsearch on the target host (ideally *not* on the same machine)
|
||||
2. Create an empty index with the mapping you want to benchmark
|
||||
3. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it.
|
||||
4. Delete the index
|
||||
5. Repeat steps 2. - 4. for multiple iterations. The first iterations are intended as warmup for Elasticsearch itself. Always start the same benchmark in step 3!
|
||||
4. After the benchmark: Shutdown Elasticsearch and delete the data directory
|
||||
1. Build `client-benchmark-noop-api-plugin` with `gradle :client:client-benchmark-noop-api-plugin:assemble`
|
||||
2. Install it on the target host with `bin/elasticsearch-plugin install file:///full/path/to/client-benchmark-noop-api-plugin.zip`
|
||||
3. Start Elasticsearch on the target host (ideally *not* on the same machine)
|
||||
4. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it.
|
||||
|
||||
Repeat all steps above for the other benchmark candidate.
|
||||
|
||||
Example benchmark:
|
||||
### Example benchmark
|
||||
|
||||
* Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress
|
||||
* Use the mapping file https://github.com/elastic/rally-tracks/blob/master/geonames/mappings.json to create the index
|
||||
In general, you should define a few GC-related settings `-Xms8192M -Xmx8192M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.
|
||||
|
||||
Example command line parameter list:
|
||||
#### Bulk indexing
|
||||
|
||||
Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress them.
|
||||
|
||||
Example command line parameters:
|
||||
|
||||
```
|
||||
rest 192.168.2.2 /home/your_user_name/.rally/benchmarks/data/geonames/documents.json geonames type 8647880 5000 "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\""
|
||||
rest bulk 192.168.2.2 ./documents.json geonames type 8647880 5000
|
||||
```
|
||||
|
||||
The parameters are in order:
|
||||
|
||||
* Client type: Use either "rest" or "transport"
|
||||
* Benchmark type: Use either "bulk" or "search"
|
||||
* Benchmark target host IP (the host where Elasticsearch is running)
|
||||
* full path to the file that should be bulk indexed
|
||||
* name of the index
|
||||
* name of the (sole) type in the index
|
||||
* number of documents in the file
|
||||
* bulk size
|
||||
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
|
||||
|
||||
You should also define a few GC-related settings `-Xms4096M -Xmx4096M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.
|
||||
|
||||
|
||||
#### Bulk indexing
|
||||
|
||||
Example command line parameters:
|
||||
|
||||
```
|
||||
rest search 192.168.2.2 geonames "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\"" 500,1000,1100,1200
|
||||
```
|
||||
|
||||
The parameters are in order:
|
||||
|
||||
* Client type: Use either "rest" or "transport"
|
||||
* Benchmark type: Use either "bulk" or "search"
|
||||
* Benchmark target host IP (the host where Elasticsearch is running)
|
||||
* name of the index
|
||||
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
|
||||
* A comma-separated list of target throughput rates
|
||||
|
||||
|
||||
|
@ -50,6 +50,8 @@ dependencies {
|
||||
compile 'org.apache.commons:commons-math3:3.2'
|
||||
|
||||
compile("org.elasticsearch.client:rest:${version}")
|
||||
// bottleneck should be the client, not Elasticsearch
|
||||
compile project(path: ':client:client-benchmark-noop-api-plugin')
|
||||
// for transport client
|
||||
compile("org.elasticsearch:elasticsearch:${version}")
|
||||
compile("org.elasticsearch.client:transport:${version}")
|
||||
@ -62,7 +64,3 @@ dependencies {
|
||||
|
||||
// No licenses for our benchmark deps (we don't ship benchmarks)
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
extraArchive {
|
||||
javadoc = false
|
||||
}
|
||||
|
@ -27,7 +27,11 @@ import org.elasticsearch.common.SuppressForbidden;
|
||||
import java.io.Closeable;
|
||||
import java.lang.management.GarbageCollectorMXBean;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public abstract class AbstractBenchmark<T extends Closeable> {
|
||||
private static final int SEARCH_BENCHMARK_ITERATIONS = 10_000;
|
||||
@ -40,52 +44,111 @@ public abstract class AbstractBenchmark<T extends Closeable> {
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
public final void run(String[] args) throws Exception {
|
||||
if (args.length < 6) {
|
||||
System.err.println(
|
||||
"usage: benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize [search request body]");
|
||||
if (args.length < 1) {
|
||||
System.err.println("usage: [search|bulk]");
|
||||
System.exit(1);
|
||||
}
|
||||
String benchmarkTargetHost = args[0];
|
||||
String indexFilePath = args[1];
|
||||
String indexName = args[2];
|
||||
String typeName = args[3];
|
||||
int totalDocs = Integer.valueOf(args[4]);
|
||||
int bulkSize = Integer.valueOf(args[5]);
|
||||
switch (args[0]) {
|
||||
case "search":
|
||||
runSearchBenchmark(args);
|
||||
break;
|
||||
case "bulk":
|
||||
runBulkIndexBenchmark(args);
|
||||
break;
|
||||
default:
|
||||
System.err.println("Unknown benchmark type [" + args[0] + "]");
|
||||
System.exit(1);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
private void runBulkIndexBenchmark(String[] args) throws Exception {
|
||||
if (args.length != 7) {
|
||||
System.err.println(
|
||||
"usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize");
|
||||
System.exit(1);
|
||||
}
|
||||
String benchmarkTargetHost = args[1];
|
||||
String indexFilePath = args[2];
|
||||
String indexName = args[3];
|
||||
String typeName = args[4];
|
||||
int totalDocs = Integer.valueOf(args[5]);
|
||||
int bulkSize = Integer.valueOf(args[6]);
|
||||
|
||||
int totalIterationCount = (int) Math.floor(totalDocs / bulkSize);
|
||||
// consider 40% of all iterations as warmup iterations
|
||||
int warmupIterations = (int) (0.4d * totalIterationCount);
|
||||
int iterations = totalIterationCount - warmupIterations;
|
||||
String searchBody = (args.length == 7) ? args[6] : null;
|
||||
|
||||
T client = client(benchmarkTargetHost);
|
||||
|
||||
BenchmarkRunner benchmark = new BenchmarkRunner(warmupIterations, iterations,
|
||||
new BulkBenchmarkTask(
|
||||
bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations + iterations, bulkSize));
|
||||
bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations, iterations, bulkSize));
|
||||
|
||||
try {
|
||||
benchmark.run();
|
||||
if (searchBody != null) {
|
||||
for (int run = 1; run <= 5; run++) {
|
||||
System.out.println("=============");
|
||||
System.out.println(" Trial run " + run);
|
||||
System.out.println("=============");
|
||||
|
||||
for (int throughput = 100; throughput <= 100_000; throughput *= 10) {
|
||||
//GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
|
||||
runGc();
|
||||
BenchmarkRunner searchBenchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
|
||||
new SearchBenchmarkTask(
|
||||
searchRequestExecutor(client, indexName), searchBody, 2 * SEARCH_BENCHMARK_ITERATIONS, throughput));
|
||||
System.out.printf("Target throughput = %d ops / s%n", throughput);
|
||||
searchBenchmark.run();
|
||||
}
|
||||
}
|
||||
}
|
||||
runTrials(() -> {
|
||||
runGc();
|
||||
benchmark.run();
|
||||
});
|
||||
} finally {
|
||||
client.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
private void runSearchBenchmark(String[] args) throws Exception {
|
||||
if (args.length != 5) {
|
||||
System.err.println(
|
||||
"usage: 'search' benchmarkTargetHostIp indexName searchRequestBody throughputRates");
|
||||
System.exit(1);
|
||||
}
|
||||
String benchmarkTargetHost = args[1];
|
||||
String indexName = args[2];
|
||||
String searchBody = args[3];
|
||||
List<Integer> throughputRates = Arrays.asList(args[4].split(",")).stream().map(Integer::valueOf).collect(Collectors.toList());
|
||||
|
||||
T client = client(benchmarkTargetHost);
|
||||
|
||||
try {
|
||||
runTrials(() -> {
|
||||
for (int throughput : throughputRates) {
|
||||
//GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
|
||||
runGc();
|
||||
BenchmarkRunner benchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
|
||||
new SearchBenchmarkTask(
|
||||
searchRequestExecutor(client, indexName), searchBody, SEARCH_BENCHMARK_ITERATIONS,
|
||||
SEARCH_BENCHMARK_ITERATIONS, throughput));
|
||||
System.out.printf("Target throughput = %d ops / s%n", throughput);
|
||||
benchmark.run();
|
||||
}
|
||||
});
|
||||
} finally {
|
||||
client.close();
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
private void runTrials(Runnable runner) {
|
||||
int totalWarmupTrialRuns = 1;
|
||||
for (int run = 1; run <= totalWarmupTrialRuns; run++) {
|
||||
System.out.println("======================");
|
||||
System.out.println(" Warmup trial run " + run + "/" + totalWarmupTrialRuns);
|
||||
System.out.println("======================");
|
||||
runner.run();
|
||||
}
|
||||
|
||||
int totalTrialRuns = 5;
|
||||
for (int run = 1; run <= totalTrialRuns; run++) {
|
||||
System.out.println("================");
|
||||
System.out.println(" Trial run " + run + "/" + totalTrialRuns);
|
||||
System.out.println("================");
|
||||
|
||||
runner.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -37,7 +37,7 @@ public class BenchmarkMain {
|
||||
benchmark = new RestClientBenchmark();
|
||||
break;
|
||||
default:
|
||||
System.err.println("Unknown benchmark type [" + type + "]");
|
||||
System.err.println("Unknown client type [" + type + "]");
|
||||
System.exit(1);
|
||||
}
|
||||
benchmark.run(Arrays.copyOfRange(args, 1, args.length));
|
||||
|
@ -40,8 +40,8 @@ public final class BenchmarkRunner {
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a command line tool")
|
||||
public void run() throws Exception {
|
||||
SampleRecorder recorder = new SampleRecorder(warmupIterations, iterations);
|
||||
public void run() {
|
||||
SampleRecorder recorder = new SampleRecorder(iterations);
|
||||
System.out.printf("Running %s with %d warmup iterations and %d iterations.%n",
|
||||
task.getClass().getSimpleName(), warmupIterations, iterations);
|
||||
|
||||
@ -52,6 +52,8 @@ public final class BenchmarkRunner {
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
} catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
|
||||
List<Sample> samples = recorder.getSamples();
|
||||
@ -62,17 +64,24 @@ public final class BenchmarkRunner {
|
||||
}
|
||||
|
||||
for (Metrics metrics : summaryMetrics) {
|
||||
System.out.printf(Locale.ROOT, "Operation: %s%n", metrics.operation);
|
||||
String stats = String.format(Locale.ROOT,
|
||||
"Throughput = %f ops/s, p90 = %f ms, p95 = %f ms, p99 = %f ms, p99.9 = %f ms, p99.99 = %f ms",
|
||||
metrics.throughput,
|
||||
metrics.serviceTimeP90, metrics.serviceTimeP95,
|
||||
metrics.serviceTimeP99, metrics.serviceTimeP999,
|
||||
metrics.serviceTimeP9999);
|
||||
System.out.println(repeat(stats.length(), '-'));
|
||||
System.out.println(stats);
|
||||
String throughput = String.format(Locale.ROOT, "Throughput [ops/s]: %f", metrics.throughput);
|
||||
String serviceTimes = String.format(Locale.ROOT,
|
||||
"Service time [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
|
||||
metrics.serviceTimeP50, metrics.serviceTimeP90, metrics.serviceTimeP95,
|
||||
metrics.serviceTimeP99, metrics.serviceTimeP999, metrics.serviceTimeP9999);
|
||||
String latencies = String.format(Locale.ROOT,
|
||||
"Latency [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
|
||||
metrics.latencyP50, metrics.latencyP90, metrics.latencyP95,
|
||||
metrics.latencyP99, metrics.latencyP999, metrics.latencyP9999);
|
||||
|
||||
int lineLength = Math.max(serviceTimes.length(), latencies.length());
|
||||
|
||||
System.out.println(repeat(lineLength, '-'));
|
||||
System.out.println(throughput);
|
||||
System.out.println(serviceTimes);
|
||||
System.out.println(latencies);
|
||||
System.out.printf("success count = %d, error count = %d%n", metrics.successCount, metrics.errorCount);
|
||||
System.out.println(repeat(stats.length(), '-'));
|
||||
System.out.println(repeat(lineLength, '-'));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,23 +23,38 @@ public final class Metrics {
|
||||
public final long successCount;
|
||||
public final long errorCount;
|
||||
public final double throughput;
|
||||
public final double serviceTimeP50;
|
||||
public final double serviceTimeP90;
|
||||
public final double serviceTimeP95;
|
||||
public final double serviceTimeP99;
|
||||
public final double serviceTimeP999;
|
||||
public final double serviceTimeP9999;
|
||||
public final double latencyP50;
|
||||
public final double latencyP90;
|
||||
public final double latencyP95;
|
||||
public final double latencyP99;
|
||||
public final double latencyP999;
|
||||
public final double latencyP9999;
|
||||
|
||||
public Metrics(String operation, long successCount, long errorCount, double throughput,
|
||||
double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
|
||||
double serviceTimeP999, double serviceTimeP9999) {
|
||||
double serviceTimeP50, double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
|
||||
double serviceTimeP999, double serviceTimeP9999, double latencyP50, double latencyP90,
|
||||
double latencyP95, double latencyP99, double latencyP999, double latencyP9999) {
|
||||
this.operation = operation;
|
||||
this.successCount = successCount;
|
||||
this.errorCount = errorCount;
|
||||
this.throughput = throughput;
|
||||
this.serviceTimeP50 = serviceTimeP50;
|
||||
this.serviceTimeP90 = serviceTimeP90;
|
||||
this.serviceTimeP95 = serviceTimeP95;
|
||||
this.serviceTimeP99 = serviceTimeP99;
|
||||
this.serviceTimeP999 = serviceTimeP999;
|
||||
this.serviceTimeP9999 = serviceTimeP9999;
|
||||
this.latencyP50 = latencyP50;
|
||||
this.latencyP90 = latencyP90;
|
||||
this.latencyP95 = latencyP95;
|
||||
this.latencyP99 = latencyP99;
|
||||
this.latencyP999 = latencyP999;
|
||||
this.latencyP9999 = latencyP9999;
|
||||
}
|
||||
}
|
||||
|
@ -50,13 +50,16 @@ public final class MetricsCalculator {
|
||||
for (Map.Entry<String, List<Sample>> operationAndMetrics : samplesPerOperation.entrySet()) {
|
||||
List<Sample> samples = operationAndMetrics.getValue();
|
||||
double[] serviceTimes = new double[samples.size()];
|
||||
double[] latencies = new double[samples.size()];
|
||||
int it = 0;
|
||||
long firstStart = Long.MAX_VALUE;
|
||||
long latestEnd = Long.MIN_VALUE;
|
||||
for (Sample sample : samples) {
|
||||
firstStart = Math.min(sample.getStartTimestamp(), firstStart);
|
||||
latestEnd = Math.max(sample.getStopTimestamp(), latestEnd);
|
||||
serviceTimes[it++] = sample.getServiceTime();
|
||||
serviceTimes[it] = sample.getServiceTime();
|
||||
latencies[it] = sample.getLatency();
|
||||
it++;
|
||||
}
|
||||
|
||||
metrics.add(new Metrics(operationAndMetrics.getKey(),
|
||||
@ -65,11 +68,18 @@ public final class MetricsCalculator {
|
||||
// throughput calculation is based on the total (Wall clock) time it took to generate all samples
|
||||
calculateThroughput(samples.size(), latestEnd - firstStart),
|
||||
// convert ns -> ms without losing precision
|
||||
StatUtils.percentile(serviceTimes, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
|
||||
StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
|
||||
StatUtils.percentile(latencies, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
|
||||
}
|
||||
return metrics;
|
||||
}
|
||||
|
@ -20,12 +20,14 @@ package org.elasticsearch.client.benchmark.metrics;
|
||||
|
||||
public final class Sample {
|
||||
private final String operation;
|
||||
private final long expectedStartTimestamp;
|
||||
private final long startTimestamp;
|
||||
private final long stopTimestamp;
|
||||
private final boolean success;
|
||||
|
||||
public Sample(String operation, long startTimestamp, long stopTimestamp, boolean success) {
|
||||
public Sample(String operation, long expectedStartTimestamp, long startTimestamp, long stopTimestamp, boolean success) {
|
||||
this.operation = operation;
|
||||
this.expectedStartTimestamp = expectedStartTimestamp;
|
||||
this.startTimestamp = startTimestamp;
|
||||
this.stopTimestamp = stopTimestamp;
|
||||
this.success = success;
|
||||
@ -48,7 +50,10 @@ public final class Sample {
|
||||
}
|
||||
|
||||
public long getServiceTime() {
|
||||
// this is *not* latency, we're not including wait time in the queue (on purpose)
|
||||
return stopTimestamp - startTimestamp;
|
||||
}
|
||||
|
||||
public long getLatency() {
|
||||
return stopTimestamp - expectedStartTimestamp;
|
||||
}
|
||||
}
|
||||
|
@ -28,21 +28,14 @@ import java.util.List;
|
||||
* This class is NOT threadsafe.
|
||||
*/
|
||||
public final class SampleRecorder {
|
||||
private final int warmupIterations;
|
||||
private final List<Sample> samples;
|
||||
private int currentIteration;
|
||||
|
||||
public SampleRecorder(int warmupIterations, int iterations) {
|
||||
this.warmupIterations = warmupIterations;
|
||||
public SampleRecorder(int iterations) {
|
||||
this.samples = new ArrayList<>(iterations);
|
||||
}
|
||||
|
||||
public void addSample(Sample sample) {
|
||||
currentIteration++;
|
||||
// only add samples after warmup
|
||||
if (currentIteration > warmupIterations) {
|
||||
samples.add(sample);
|
||||
}
|
||||
samples.add(sample);
|
||||
}
|
||||
|
||||
public List<Sample> getSamples() {
|
||||
|
@ -18,13 +18,13 @@
|
||||
*/
|
||||
package org.elasticsearch.client.benchmark.ops.bulk;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.client.benchmark.BenchmarkTask;
|
||||
import org.elasticsearch.client.benchmark.metrics.Sample;
|
||||
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
@ -43,15 +43,18 @@ import java.util.concurrent.TimeUnit;
|
||||
public class BulkBenchmarkTask implements BenchmarkTask {
|
||||
private final BulkRequestExecutor requestExecutor;
|
||||
private final String indexFilePath;
|
||||
private final int totalIterations;
|
||||
private final int warmupIterations;
|
||||
private final int measurementIterations;
|
||||
private final int bulkSize;
|
||||
private LoadGenerator generator;
|
||||
private ExecutorService executorService;
|
||||
|
||||
public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int totalIterations, int bulkSize) {
|
||||
public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int warmupIterations, int measurementIterations,
|
||||
int bulkSize) {
|
||||
this.requestExecutor = requestExecutor;
|
||||
this.indexFilePath = indexFilePath;
|
||||
this.totalIterations = totalIterations;
|
||||
this.warmupIterations = warmupIterations;
|
||||
this.measurementIterations = measurementIterations;
|
||||
this.bulkSize = bulkSize;
|
||||
}
|
||||
|
||||
@ -60,7 +63,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
||||
public void setUp(SampleRecorder sampleRecorder) {
|
||||
BlockingQueue<List<String>> bulkQueue = new ArrayBlockingQueue<>(256);
|
||||
|
||||
BulkIndexer runner = new BulkIndexer(bulkQueue, totalIterations, sampleRecorder, requestExecutor);
|
||||
BulkIndexer runner = new BulkIndexer(bulkQueue, warmupIterations, measurementIterations, sampleRecorder, requestExecutor);
|
||||
|
||||
executorService = Executors.newSingleThreadExecutor((r) -> new Thread(r, "bulk-index-runner"));
|
||||
executorService.submit(runner);
|
||||
@ -132,24 +135,26 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
||||
|
||||
|
||||
private static final class BulkIndexer implements Runnable {
|
||||
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
|
||||
private static final Logger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
|
||||
|
||||
private final BlockingQueue<List<String>> bulkData;
|
||||
private final int totalIterations;
|
||||
private final int warmupIterations;
|
||||
private final int measurementIterations;
|
||||
private final BulkRequestExecutor bulkRequestExecutor;
|
||||
private final SampleRecorder sampleRecorder;
|
||||
|
||||
public BulkIndexer(BlockingQueue<List<String>> bulkData, int totalIterations, SampleRecorder sampleRecorder,
|
||||
BulkRequestExecutor bulkRequestExecutor) {
|
||||
public BulkIndexer(BlockingQueue<List<String>> bulkData, int warmupIterations, int measurementIterations,
|
||||
SampleRecorder sampleRecorder, BulkRequestExecutor bulkRequestExecutor) {
|
||||
this.bulkData = bulkData;
|
||||
this.totalIterations = totalIterations;
|
||||
this.warmupIterations = warmupIterations;
|
||||
this.measurementIterations = measurementIterations;
|
||||
this.bulkRequestExecutor = bulkRequestExecutor;
|
||||
this.sampleRecorder = sampleRecorder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int iteration = 0; iteration < totalIterations; iteration++) {
|
||||
for (int iteration = 0; iteration < warmupIterations + measurementIterations; iteration++) {
|
||||
boolean success = false;
|
||||
List<String> currentBulk;
|
||||
try {
|
||||
@ -158,8 +163,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
// Yes, this approach is prone to coordinated omission *but* we have to consider that we want to benchmark a closed system
|
||||
// with backpressure here instead of an open system. So this is actually correct in this case.
|
||||
//measure only service time, latency is not that interesting for a throughput benchmark
|
||||
long start = System.nanoTime();
|
||||
try {
|
||||
success = bulkRequestExecutor.bulkIndex(currentBulk);
|
||||
@ -167,7 +171,9 @@ public class BulkBenchmarkTask implements BenchmarkTask {
|
||||
logger.warn("Error while executing bulk request", ex);
|
||||
}
|
||||
long stop = System.nanoTime();
|
||||
sampleRecorder.addSample(new Sample("bulk", start, stop, success));
|
||||
if (iteration < warmupIterations) {
|
||||
sampleRecorder.addSample(new Sample("bulk", start, start, stop, success));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,20 +25,20 @@ import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class SearchBenchmarkTask implements BenchmarkTask {
|
||||
private static final long MICROS_PER_SEC = TimeUnit.SECONDS.toMicros(1L);
|
||||
private static final long NANOS_PER_MICRO = TimeUnit.MICROSECONDS.toNanos(1L);
|
||||
|
||||
private final SearchRequestExecutor searchRequestExecutor;
|
||||
private final String searchRequestBody;
|
||||
private final int iterations;
|
||||
private final int warmupIterations;
|
||||
private final int measurementIterations;
|
||||
private final int targetThroughput;
|
||||
|
||||
private SampleRecorder sampleRecorder;
|
||||
|
||||
public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int iterations, int targetThroughput) {
|
||||
public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int warmupIterations,
|
||||
int measurementIterations, int targetThroughput) {
|
||||
this.searchRequestExecutor = searchRequestExecutor;
|
||||
this.searchRequestBody = body;
|
||||
this.iterations = iterations;
|
||||
this.warmupIterations = warmupIterations;
|
||||
this.measurementIterations = measurementIterations;
|
||||
this.targetThroughput = targetThroughput;
|
||||
}
|
||||
|
||||
@ -49,28 +49,25 @@ public class SearchBenchmarkTask implements BenchmarkTask {
|
||||
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
for (int iteration = 0; iteration < this.iterations; iteration++) {
|
||||
final long start = System.nanoTime();
|
||||
boolean success = searchRequestExecutor.search(searchRequestBody);
|
||||
final long stop = System.nanoTime();
|
||||
sampleRecorder.addSample(new Sample("search", start, stop, success));
|
||||
|
||||
int waitTime = (int) Math.floor(MICROS_PER_SEC / targetThroughput - (stop - start) / NANOS_PER_MICRO);
|
||||
if (waitTime > 0) {
|
||||
waitMicros(waitTime);
|
||||
}
|
||||
}
|
||||
runIterations(warmupIterations, false);
|
||||
runIterations(measurementIterations, true);
|
||||
}
|
||||
|
||||
private void waitMicros(int waitTime) throws InterruptedException {
|
||||
// Thread.sleep() time is not very accurate (it's most of the time around 1 - 2 ms off)
|
||||
// we busy spin all the time to avoid introducing additional measurement artifacts (noticed 100% skew on 99.9th percentile)
|
||||
// this approach is not suitable for low throughput rates (in the second range) though
|
||||
if (waitTime > 0) {
|
||||
long end = System.nanoTime() + 1000L * waitTime;
|
||||
while (end > System.nanoTime()) {
|
||||
private void runIterations(int iterations, boolean addSample) {
|
||||
long interval = TimeUnit.SECONDS.toNanos(1L) / targetThroughput;
|
||||
|
||||
long totalStart = System.nanoTime();
|
||||
for (int iteration = 0; iteration < iterations; iteration++) {
|
||||
long expectedStart = totalStart + iteration * interval;
|
||||
while (System.nanoTime() < expectedStart) {
|
||||
// busy spin
|
||||
}
|
||||
long start = System.nanoTime();
|
||||
boolean success = searchRequestExecutor.search(searchRequestBody);
|
||||
long stop = System.nanoTime();
|
||||
if (addSample) {
|
||||
sampleRecorder.addSample(new Sample("search", expectedStart, start, stop, success));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19,14 +19,20 @@
|
||||
package org.elasticsearch.client.benchmark.rest;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.conn.ConnectionKeepAliveStrategy;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.client.benchmark.AbstractBenchmark;
|
||||
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
|
||||
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
|
||||
@ -45,7 +51,12 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
|
||||
|
||||
@Override
|
||||
protected RestClient client(String benchmarkTargetHost) {
|
||||
return RestClient.builder(new HttpHost(benchmarkTargetHost, 9200)).build();
|
||||
return RestClient
|
||||
.builder(new HttpHost(benchmarkTargetHost, 9200))
|
||||
.setHttpClientConfigCallback(b -> b.setDefaultHeaders(
|
||||
Collections.singleton(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "gzip"))))
|
||||
.setRequestConfigCallback(b -> b.setContentCompressionEnabled(true))
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -77,7 +88,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
|
||||
}
|
||||
HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON);
|
||||
try {
|
||||
Response response = client.performRequest("POST", "/geonames/type/_bulk", Collections.emptyMap(), entity);
|
||||
Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity);
|
||||
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchException(e);
|
||||
@ -91,7 +102,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
|
||||
|
||||
private RestSearchRequestExecutor(RestClient client, String indexName) {
|
||||
this.client = client;
|
||||
this.endpoint = "/" + indexName + "/_search";
|
||||
this.endpoint = "/" + indexName + "/_noop_search";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -19,7 +19,6 @@
|
||||
package org.elasticsearch.client.benchmark.transport;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
@ -30,6 +29,11 @@ import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugin.noop.NoopPlugin;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkRequestBuilder;
|
||||
import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
|
||||
import org.elasticsearch.plugin.noop.action.search.NoopSearchRequestBuilder;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.transport.client.PreBuiltTransportClient;
|
||||
|
||||
@ -46,7 +50,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
||||
|
||||
@Override
|
||||
protected TransportClient client(String benchmarkTargetHost) throws Exception {
|
||||
TransportClient client = new PreBuiltTransportClient(Settings.EMPTY);
|
||||
TransportClient client = new PreBuiltTransportClient(Settings.EMPTY, NoopPlugin.class);
|
||||
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(benchmarkTargetHost), 9300));
|
||||
return client;
|
||||
}
|
||||
@ -74,7 +78,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
||||
|
||||
@Override
|
||||
public boolean bulkIndex(List<String> bulkData) {
|
||||
BulkRequestBuilder builder = client.prepareBulk();
|
||||
NoopBulkRequestBuilder builder = NoopBulkAction.INSTANCE.newRequestBuilder(client);
|
||||
for (String bulkItem : bulkData) {
|
||||
builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8)));
|
||||
}
|
||||
@ -103,8 +107,11 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
||||
@Override
|
||||
public boolean search(String source) {
|
||||
final SearchResponse response;
|
||||
NoopSearchRequestBuilder builder = NoopSearchAction.INSTANCE.newRequestBuilder(client);
|
||||
try {
|
||||
response = client.prepareSearch(indexName).setQuery(QueryBuilders.wrapperQuery(source)).execute().get();
|
||||
builder.setIndices(indexName);
|
||||
builder.setQuery(QueryBuilders.wrapperQuery(source));
|
||||
response = client.execute(NoopSearchAction.INSTANCE, builder.request()).get();
|
||||
return response.status() == RestStatus.OK;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
|
@ -1,9 +0,0 @@
|
||||
es.logger.level=INFO
|
||||
log4j.rootLogger=${es.logger.level}, out
|
||||
|
||||
log4j.logger.org.apache.http=INFO, out
|
||||
log4j.additivity.org.apache.http=false
|
||||
|
||||
log4j.appender.out=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.out.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
|
7
client/benchmark/src/main/resources/log4j2.properties
Normal file
7
client/benchmark/src/main/resources/log4j2.properties
Normal file
@ -0,0 +1,7 @@
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
23
client/client-benchmark-noop-api-plugin/README.md
Normal file
23
client/client-benchmark-noop-api-plugin/README.md
Normal file
@ -0,0 +1,23 @@
|
||||
### Purpose
|
||||
|
||||
This plugin provides empty REST and transport endpoints for bulk indexing and search. It is used to avoid accidental server-side bottlenecks in client-side benchmarking.
|
||||
|
||||
### Build Instructions
|
||||
|
||||
Build the plugin with `gradle :client:client-benchmark-noop-api-plugin:assemble` from the Elasticsearch root project directory.
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
After, the binary has been built, install it with `bin/elasticsearch-plugin install file:///full/path/to/noop-plugin.zip`.
|
||||
|
||||
### Usage
|
||||
|
||||
The plugin provides two REST endpoints:
|
||||
|
||||
* `/_noop_bulk` and all variations that the bulk endpoint provides (except that all no op endpoints are called `_noop_bulk` instead of `_bulk`)
|
||||
* `_noop_search` and all variations that the search endpoint provides (except that all no op endpoints are called `_noop_search` instead of `_search`)
|
||||
|
||||
The corresponding transport actions are:
|
||||
|
||||
* `org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction`
|
||||
* `org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction`
|
@ -1,5 +1,3 @@
|
||||
package org.elasticsearch.mapper.attachments;
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
@ -19,12 +17,19 @@ package org.elasticsearch.mapper.attachments;
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
group = 'org.elasticsearch.plugin'
|
||||
|
||||
public class TikaImplTests extends ESTestCase {
|
||||
|
||||
public void testTikaLoads() throws Exception {
|
||||
Class.forName("org.elasticsearch.mapper.attachments.TikaImpl");
|
||||
}
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
name 'client-benchmark-noop-api'
|
||||
description 'Stubbed out Elasticsearch actions that can be used for client-side benchmarking'
|
||||
classname 'org.elasticsearch.plugin.noop.NoopPlugin'
|
||||
}
|
||||
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
|
||||
|
||||
// no unit tests
|
||||
test.enabled = false
|
||||
integTest.enabled = false
|
||||
|
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop;
|
||||
|
||||
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.RestNoopBulkAction;
|
||||
import org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
|
||||
import org.elasticsearch.plugin.noop.action.search.RestNoopSearchAction;
|
||||
import org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestHandler;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class NoopPlugin extends Plugin implements ActionPlugin {
|
||||
@Override
|
||||
public List<ActionHandler<? extends ActionRequest<?>, ? extends ActionResponse>> getActions() {
|
||||
return Arrays.asList(
|
||||
new ActionHandler<>(NoopBulkAction.INSTANCE, TransportNoopBulkAction.class),
|
||||
new ActionHandler<>(NoopSearchAction.INSTANCE, TransportNoopSearchAction.class)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Class<? extends RestHandler>> getRestHandlers() {
|
||||
return Arrays.asList(RestNoopBulkAction.class, RestNoopSearchAction.class);
|
||||
}
|
||||
}
|
@ -16,29 +16,29 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
package org.elasticsearch.common.logging;
|
||||
public class NoopBulkAction extends Action<BulkRequest, BulkResponse, NoopBulkRequestBuilder> {
|
||||
public static final String NAME = "mock:data/write/bulk";
|
||||
|
||||
import org.apache.log4j.AppenderSkeleton;
|
||||
import org.apache.log4j.spi.LoggingEvent;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
public static final NoopBulkAction INSTANCE = new NoopBulkAction();
|
||||
|
||||
/**
|
||||
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli.
|
||||
* */
|
||||
public class TerminalAppender extends AppenderSkeleton {
|
||||
@Override
|
||||
protected void append(LoggingEvent event) {
|
||||
Terminal.DEFAULT.println(event.getRenderedMessage());
|
||||
private NoopBulkAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
public NoopBulkRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NoopBulkRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean requiresLayout() {
|
||||
return false;
|
||||
public BulkResponse newResponse() {
|
||||
return new BulkResponse(null, 0);
|
||||
}
|
||||
}
|
@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, NoopBulkRequestBuilder>
|
||||
implements WriteRequestBuilder<NoopBulkRequestBuilder> {
|
||||
|
||||
public NoopBulkRequestBuilder(ElasticsearchClient client, NoopBulkAction action) {
|
||||
super(client, action, new BulkRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
|
||||
* (for example, if no id is provided, one will be generated, or usage of the create flag).
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(IndexRequest request) {
|
||||
super.request.add(request);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
|
||||
* (for example, if no id is provided, one will be generated, or usage of the create flag).
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(IndexRequestBuilder request) {
|
||||
super.request.add(request.request());
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link DeleteRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(DeleteRequest request) {
|
||||
super.request.add(request);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link DeleteRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(DeleteRequestBuilder request) {
|
||||
super.request.add(request.request());
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Adds an {@link UpdateRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(UpdateRequest request) {
|
||||
super.request.add(request);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an {@link UpdateRequest} to the list of actions to execute.
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(UpdateRequestBuilder request) {
|
||||
super.request.add(request.request());
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(byte[] data, int from, int length) throws Exception {
|
||||
request.add(data, from, length, null, null);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
public NoopBulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType)
|
||||
throws Exception {
|
||||
request.add(data, from, length, defaultIndex, defaultType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that must be active before proceeding with the write.
|
||||
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
|
||||
*/
|
||||
public NoopBulkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
|
||||
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
|
||||
* to get the ActiveShardCount.
|
||||
*/
|
||||
public NoopBulkRequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
|
||||
return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
public final NoopBulkRequestBuilder setTimeout(String timeout) {
|
||||
request.timeout(timeout);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of actions currently in the bulk.
|
||||
*/
|
||||
public int numberOfActions() {
|
||||
return request.numberOfActions();
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.DocumentRequest;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.PUT;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
public class RestNoopBulkAction extends BaseRestHandler {
|
||||
@Inject
|
||||
public RestNoopBulkAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
|
||||
controller.registerHandler(POST, "/_noop_bulk", this);
|
||||
controller.registerHandler(PUT, "/_noop_bulk", this);
|
||||
controller.registerHandler(POST, "/{index}/_noop_bulk", this);
|
||||
controller.registerHandler(PUT, "/{index}/_noop_bulk", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_noop_bulk", this);
|
||||
controller.registerHandler(PUT, "/{index}/{type}/_noop_bulk", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception {
|
||||
BulkRequest bulkRequest = Requests.bulkRequest();
|
||||
String defaultIndex = request.param("index");
|
||||
String defaultType = request.param("type");
|
||||
String defaultRouting = request.param("routing");
|
||||
String fieldsParam = request.param("fields");
|
||||
String defaultPipeline = request.param("pipeline");
|
||||
String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null;
|
||||
|
||||
String waitForActiveShards = request.param("wait_for_active_shards");
|
||||
if (waitForActiveShards != null) {
|
||||
bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
|
||||
}
|
||||
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
|
||||
bulkRequest.setRefreshPolicy(request.param("refresh"));
|
||||
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, null, defaultPipeline, null, true);
|
||||
|
||||
// short circuit the call to the transport layer
|
||||
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);
|
||||
listener.onResponse(bulkRequest);
|
||||
|
||||
}
|
||||
|
||||
private static class BulkRestBuilderListener extends RestBuilderListener<BulkRequest> {
|
||||
private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE,
|
||||
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
|
||||
|
||||
private final RestRequest request;
|
||||
|
||||
|
||||
public BulkRestBuilderListener(RestChannel channel, RestRequest request) {
|
||||
super(channel);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestResponse buildResponse(BulkRequest bulkRequest, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
builder.field(Fields.TOOK, 0);
|
||||
builder.field(Fields.ERRORS, false);
|
||||
builder.startArray(Fields.ITEMS);
|
||||
for (int idx = 0; idx < bulkRequest.numberOfActions(); idx++) {
|
||||
builder.startObject();
|
||||
ITEM_RESPONSE.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(OK, builder);
|
||||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String ITEMS = "items";
|
||||
static final String ERRORS = "errors";
|
||||
static final String TOOK = "took";
|
||||
}
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.DocumentRequest;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
|
||||
private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, DocumentRequest.OpType.UPDATE,
|
||||
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
|
||||
|
||||
@Inject
|
||||
public TransportNoopBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NoopBulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(BulkRequest request, ActionListener<BulkResponse> listener) {
|
||||
final int itemCount = request.subRequests().size();
|
||||
// simulate at least a realistic amount of data that gets serialized
|
||||
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[itemCount];
|
||||
for (int idx = 0; idx < itemCount; idx++) {
|
||||
bulkItemResponses[idx] = ITEM_RESPONSE;
|
||||
}
|
||||
listener.onResponse(new BulkResponse(bulkItemResponses, 0));
|
||||
}
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NoopSearchAction extends Action<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
|
||||
public static final NoopSearchAction INSTANCE = new NoopSearchAction();
|
||||
public static final String NAME = "mock:data/read/search";
|
||||
|
||||
public NoopSearchAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NoopSearchRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NoopSearchRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchResponse newResponse() {
|
||||
return new SearchResponse();
|
||||
}
|
||||
}
|
@ -0,0 +1,496 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.search.slice.SliceBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
|
||||
|
||||
public NoopSearchRequestBuilder(ElasticsearchClient client, NoopSearchAction action) {
|
||||
super(client, action, new SearchRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the indices the search will be executed on.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setIndices(String... indices) {
|
||||
request.indices(indices);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The document types to execute the search against. Defaults to be executed against
|
||||
* all types.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTypes(String... types) {
|
||||
request.types(types);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSearchType(SearchType searchType) {
|
||||
request.searchType(searchType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The a string representation search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}. Can be
|
||||
* one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
|
||||
* "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSearchType(String searchType) {
|
||||
request.searchType(searchType);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* If set, will enable scrolling of the search request.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setScroll(Scroll scroll) {
|
||||
request.scroll(scroll);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* If set, will enable scrolling of the search request for the specified timeout.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setScroll(TimeValue keepAlive) {
|
||||
request.scroll(keepAlive);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* If set, will enable scrolling of the search request for the specified timeout.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setScroll(String keepAlive) {
|
||||
request.scroll(keepAlive);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* An optional timeout to control how long search is allowed to take.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTimeout(TimeValue timeout) {
|
||||
sourceBuilder().timeout(timeout);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* An optional document count, upon collecting which the search
|
||||
* query will early terminate
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTerminateAfter(int terminateAfter) {
|
||||
sourceBuilder().terminateAfter(terminateAfter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A comma separated list of routing values to control the shards the search will be executed on.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRouting(String routing) {
|
||||
request.routing(routing);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The routing values to control the shards that the search will be executed on.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRouting(String... routing) {
|
||||
request.routing(routing);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
|
||||
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
|
||||
* a custom value, which guarantees that the same order will be used across different requests.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setPreference(String preference) {
|
||||
request.preference(preference);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies what type of requested indices to ignore and wildcard indices expressions.
|
||||
* <p>
|
||||
* For example indices that don't exist.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
|
||||
request().indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new search source builder with a search query.
|
||||
*
|
||||
* @see org.elasticsearch.index.query.QueryBuilders
|
||||
*/
|
||||
public NoopSearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
|
||||
sourceBuilder().query(queryBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a filter that will be executed after the query has been executed and only has affect on the search hits
|
||||
* (not aggregations). This filter is always executed as last filtering mechanism.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setPostFilter(QueryBuilder postFilter) {
|
||||
sourceBuilder().postFilter(postFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the minimum score below which docs will be filtered out.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setMinScore(float minScore) {
|
||||
sourceBuilder().minScore(minScore);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* From index to start the search from. Defaults to <tt>0</tt>.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFrom(int from) {
|
||||
sourceBuilder().from(from);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of search hits to return. Defaults to <tt>10</tt>.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSize(int size) {
|
||||
sourceBuilder().size(size);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should each {@link org.elasticsearch.search.SearchHit} be returned with an
|
||||
* explanation of the hit (ranking).
|
||||
*/
|
||||
public NoopSearchRequestBuilder setExplain(boolean explain) {
|
||||
sourceBuilder().explain(explain);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should each {@link org.elasticsearch.search.SearchHit} be returned with its
|
||||
* version.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setVersion(boolean version) {
|
||||
sourceBuilder().version(version);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the boost a specific index will receive when the query is executed against it.
|
||||
*
|
||||
* @param index The index to apply the boost against
|
||||
* @param indexBoost The boost to apply to the index
|
||||
*/
|
||||
public NoopSearchRequestBuilder addIndexBoost(String index, float indexBoost) {
|
||||
sourceBuilder().indexBoost(index, indexBoost);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The stats groups this request will be aggregated under.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setStats(String... statsGroups) {
|
||||
sourceBuilder().stats(Arrays.asList(statsGroups));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The stats groups this request will be aggregated under.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setStats(List<String> statsGroups) {
|
||||
sourceBuilder().stats(statsGroups);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the response should contain the stored _source for every hit
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFetchSource(boolean fetch) {
|
||||
sourceBuilder().fetchSource(fetch);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param include An optional include (optionally wildcarded) pattern to filter the returned _source
|
||||
* @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
|
||||
sourceBuilder().fetchSource(include, exclude);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
|
||||
* @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
|
||||
*/
|
||||
public NoopSearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
|
||||
sourceBuilder().fetchSource(includes, excludes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a docvalue based field to load and return. The field does not have to be stored,
|
||||
* but its recommended to use non analyzed or numeric fields.
|
||||
*
|
||||
* @param name The field to get from the docvalue
|
||||
*/
|
||||
public NoopSearchRequestBuilder addDocValueField(String name) {
|
||||
sourceBuilder().docValueField(name);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a stored field to load and return (note, it must be stored) as part of the search request.
|
||||
* If none are specified, the source of the document will be return.
|
||||
*/
|
||||
public NoopSearchRequestBuilder addStoredField(String field) {
|
||||
sourceBuilder().storedField(field);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Adds a script based field to load and return. The field does not have to be stored,
|
||||
* but its recommended to use non analyzed or numeric fields.
|
||||
*
|
||||
* @param name The name that will represent this value in the return hit
|
||||
* @param script The script to use
|
||||
*/
|
||||
public NoopSearchRequestBuilder addScriptField(String name, Script script) {
|
||||
sourceBuilder().scriptField(name, script);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a sort against the given field name and the sort ordering.
|
||||
*
|
||||
* @param field The name of the field
|
||||
* @param order The sort ordering
|
||||
*/
|
||||
public NoopSearchRequestBuilder addSort(String field, SortOrder order) {
|
||||
sourceBuilder().sort(field, order);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a generic sort builder.
|
||||
*
|
||||
* @see org.elasticsearch.search.sort.SortBuilders
|
||||
*/
|
||||
public NoopSearchRequestBuilder addSort(SortBuilder sort) {
|
||||
sourceBuilder().sort(sort);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the sort values that indicates which docs this request should "search after".
|
||||
*/
|
||||
public NoopSearchRequestBuilder searchAfter(Object[] values) {
|
||||
sourceBuilder().searchAfter(values);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NoopSearchRequestBuilder slice(SliceBuilder builder) {
|
||||
sourceBuilder().slice(builder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
|
||||
* <tt>false</tt>.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setTrackScores(boolean trackScores) {
|
||||
sourceBuilder().trackScores(trackScores);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the fields to load and return as part of the search request. If none
|
||||
* are specified, the source of the document will be returned.
|
||||
*/
|
||||
public NoopSearchRequestBuilder storedFields(String... fields) {
|
||||
sourceBuilder().storedFields(Arrays.asList(fields));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public NoopSearchRequestBuilder addAggregation(AggregationBuilder aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an aggregation to the search operation.
|
||||
*/
|
||||
public NoopSearchRequestBuilder addAggregation(PipelineAggregationBuilder aggregation) {
|
||||
sourceBuilder().aggregation(aggregation);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NoopSearchRequestBuilder highlighter(HighlightBuilder highlightBuilder) {
|
||||
sourceBuilder().highlighter(highlightBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delegates to {@link org.elasticsearch.search.builder.SearchSourceBuilder#suggest(SuggestBuilder)}
|
||||
*/
|
||||
public NoopSearchRequestBuilder suggest(SuggestBuilder suggestBuilder) {
|
||||
sourceBuilder().suggest(suggestBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRescorer(RescoreBuilder<?> rescorer) {
|
||||
sourceBuilder().clearRescorers();
|
||||
return addRescorer(rescorer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @param window rescore window
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRescorer(RescoreBuilder rescorer, int window) {
|
||||
sourceBuilder().clearRescorers();
|
||||
return addRescorer(rescorer.windowSize(window));
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new rescorer.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer) {
|
||||
sourceBuilder().addRescorer(rescorer);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new rescorer.
|
||||
*
|
||||
* @param rescorer rescorer configuration
|
||||
* @param window rescore window
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer, int window) {
|
||||
sourceBuilder().addRescorer(rescorer.windowSize(window));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers from the builder.
|
||||
*
|
||||
* @return this for chaining
|
||||
*/
|
||||
public NoopSearchRequestBuilder clearRescorers() {
|
||||
sourceBuilder().clearRescorers();
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the source of the request as a SearchSourceBuilder.
|
||||
*/
|
||||
public NoopSearchRequestBuilder setSource(SearchSourceBuilder source) {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets if this request should use the request cache or not, assuming that it can (for
|
||||
* example, if "now" is used, it will never be cached). By default (not set, or null,
|
||||
* will default to the index level setting if request cache is enabled or not).
|
||||
*/
|
||||
public NoopSearchRequestBuilder setRequestCache(Boolean requestCache) {
|
||||
request.requestCache(requestCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the query be profiled. Defaults to <code>false</code>
|
||||
*/
|
||||
public NoopSearchRequestBuilder setProfile(boolean profile) {
|
||||
sourceBuilder().profile(profile);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (request.source() != null) {
|
||||
return request.source().toString();
|
||||
}
|
||||
return new SearchSourceBuilder().toString();
|
||||
}
|
||||
|
||||
private SearchSourceBuilder sourceBuilder() {
|
||||
if (request.source() == null) {
|
||||
request.source(new SearchSourceBuilder());
|
||||
}
|
||||
return request.source();
|
||||
}
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestStatusToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
public class RestNoopSearchAction extends BaseRestHandler {
|
||||
|
||||
@Inject
|
||||
public RestNoopSearchAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "/_noop_search", this);
|
||||
controller.registerHandler(POST, "/_noop_search", this);
|
||||
controller.registerHandler(GET, "/{index}/_noop_search", this);
|
||||
controller.registerHandler(POST, "/{index}/_noop_search", this);
|
||||
controller.registerHandler(GET, "/{index}/{type}/_noop_search", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_noop_search", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException {
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
client.execute(NoopSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel));
|
||||
}
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.plugin.noop.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHits;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
public class TransportNoopSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
|
||||
@Inject
|
||||
public TransportNoopSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters
|
||||
actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NoopSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchRequest::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
listener.onResponse(new SearchResponse(new InternalSearchResponse(
|
||||
new InternalSearchHits(
|
||||
new InternalSearchHit[0], 0L, 0.0f),
|
||||
new InternalAggregations(Collections.emptyList()),
|
||||
new Suggest(Collections.emptyList()),
|
||||
new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0]));
|
||||
}
|
||||
}
|
@ -32,7 +32,7 @@ import java.util.Objects;
|
||||
* Holds an elasticsearch response. It wraps the {@link HttpResponse} returned and associates it with
|
||||
* its corresponding {@link RequestLine} and {@link HttpHost}.
|
||||
*/
|
||||
public final class Response {
|
||||
public class Response {
|
||||
|
||||
private final RequestLine requestLine;
|
||||
private final HttpHost host;
|
||||
|
@ -33,7 +33,7 @@ public final class ResponseException extends IOException {
|
||||
|
||||
private Response response;
|
||||
|
||||
ResponseException(Response response) throws IOException {
|
||||
public ResponseException(Response response) throws IOException {
|
||||
super(buildMessage(response));
|
||||
this.response = response;
|
||||
}
|
||||
|
@ -65,19 +65,23 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Client that connects to an elasticsearch cluster through http.
|
||||
* Client that connects to an Elasticsearch cluster through HTTP.
|
||||
* <p>
|
||||
* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults.
|
||||
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
|
||||
* by calling {@link #setHosts(HttpHost...)}.
|
||||
* <p>
|
||||
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
|
||||
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
|
||||
* retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously
|
||||
* failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that
|
||||
* deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
|
||||
*
|
||||
* <p>
|
||||
* Requests can be either synchronous or asynchronous. The asynchronous variants all end with {@code Async}.
|
||||
* <p>
|
||||
* Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format.
|
||||
*/
|
||||
public final class RestClient implements Closeable {
|
||||
public class RestClient implements Closeable {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(RestClient.class);
|
||||
|
||||
@ -85,17 +89,19 @@ public final class RestClient implements Closeable {
|
||||
//we don't rely on default headers supported by HttpAsyncClient as those cannot be replaced
|
||||
private final Header[] defaultHeaders;
|
||||
private final long maxRetryTimeoutMillis;
|
||||
private final String pathPrefix;
|
||||
private final AtomicInteger lastHostIndex = new AtomicInteger(0);
|
||||
private volatile Set<HttpHost> hosts;
|
||||
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
|
||||
private final FailureListener failureListener;
|
||||
|
||||
RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
|
||||
HttpHost[] hosts, FailureListener failureListener) {
|
||||
HttpHost[] hosts, String pathPrefix, FailureListener failureListener) {
|
||||
this.client = client;
|
||||
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
|
||||
this.defaultHeaders = defaultHeaders;
|
||||
this.failureListener = failureListener;
|
||||
this.pathPrefix = pathPrefix;
|
||||
setHosts(hosts);
|
||||
}
|
||||
|
||||
@ -124,41 +130,41 @@ public final class RestClient implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
|
||||
* and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, params, (HttpEntity)null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
|
||||
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, Header...)}
|
||||
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
|
||||
* will be used to consume the response body.
|
||||
@ -168,10 +174,10 @@ public final class RestClient implements Closeable {
|
||||
* @param params the query_string parameters
|
||||
* @param entity the body of the request, null if not applicable
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, Header... headers) throws IOException {
|
||||
@ -180,7 +186,7 @@ public final class RestClient implements Closeable {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Blocks until the request is completed and returns
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Blocks until the request is completed and returns
|
||||
* its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts
|
||||
* are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times
|
||||
* they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead
|
||||
@ -193,37 +199,37 @@ public final class RestClient implements Closeable {
|
||||
* @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response
|
||||
* body gets streamed from a non-blocking HTTP connection on the client side.
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @return the response returned by Elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
Header... headers) throws IOException {
|
||||
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
|
||||
performRequest(method, endpoint, params, entity, responseConsumer, listener, headers);
|
||||
performRequestAsync(method, endpoint, params, entity, responseConsumer, listener, headers);
|
||||
return listener.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
|
||||
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
|
||||
* {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, ResponseListener responseListener, Header... headers) {
|
||||
performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
|
||||
public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) {
|
||||
performRequestAsync(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
|
||||
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
|
||||
* {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
@ -231,15 +237,15 @@ public final class RestClient implements Closeable {
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, Map<String, String> params,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
performRequest(method, endpoint, params, null, responseListener, headers);
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
performRequestAsync(method, endpoint, params, null, responseListener, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead
|
||||
* the provided {@link ResponseListener} will be notified upon completion or failure.
|
||||
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
|
||||
* Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
|
||||
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
|
||||
* will be used to consume the response body.
|
||||
*
|
||||
@ -250,14 +256,14 @@ public final class RestClient implements Closeable {
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, ResponseListener responseListener, Header... headers) {
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, ResponseListener responseListener, Header... headers) {
|
||||
HttpAsyncResponseConsumer<HttpResponse> responseConsumer = new HeapBufferedAsyncResponseConsumer();
|
||||
performRequest(method, endpoint, params, entity, responseConsumer, responseListener, headers);
|
||||
performRequestAsync(method, endpoint, params, entity, responseConsumer, responseListener, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the client points to. The request is executed asynchronously
|
||||
* Sends a request to the Elasticsearch cluster that the client points to. The request is executed asynchronously
|
||||
* and the provided {@link ResponseListener} gets notified upon request completion or failure.
|
||||
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
|
||||
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
|
||||
@ -273,20 +279,20 @@ public final class RestClient implements Closeable {
|
||||
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
|
||||
* @param headers the optional request headers
|
||||
*/
|
||||
public void performRequest(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
URI uri = buildUri(endpoint, params);
|
||||
public void performRequestAsync(String method, String endpoint, Map<String, String> params,
|
||||
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
ResponseListener responseListener, Header... headers) {
|
||||
URI uri = buildUri(pathPrefix, endpoint, params);
|
||||
HttpRequestBase request = createHttpRequest(method, uri, entity);
|
||||
setHeaders(request, headers);
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
|
||||
long startTime = System.nanoTime();
|
||||
performRequest(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
|
||||
performRequestAsync(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
|
||||
}
|
||||
|
||||
private void performRequest(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
|
||||
final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
final FailureTrackingResponseListener listener) {
|
||||
private void performRequestAsync(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
|
||||
final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
|
||||
final FailureTrackingResponseListener listener) {
|
||||
final HttpHost host = hosts.next();
|
||||
//we stream the request body if the entity allows for it
|
||||
HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request);
|
||||
@ -340,7 +346,7 @@ public final class RestClient implements Closeable {
|
||||
} else {
|
||||
listener.trackFailure(exception);
|
||||
request.reset();
|
||||
performRequest(startTime, hosts, request, responseConsumer, listener);
|
||||
performRequestAsync(startTime, hosts, request, responseConsumer, listener);
|
||||
}
|
||||
} else {
|
||||
listener.onDefinitiveFailure(exception);
|
||||
@ -356,12 +362,17 @@ public final class RestClient implements Closeable {
|
||||
|
||||
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
|
||||
Objects.requireNonNull(requestHeaders, "request headers must not be null");
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
httpRequest.setHeader(defaultHeader);
|
||||
}
|
||||
// request headers override default headers, so we don't add default headers if they exist as request headers
|
||||
final Set<String> requestNames = new HashSet<>(requestHeaders.length);
|
||||
for (Header requestHeader : requestHeaders) {
|
||||
Objects.requireNonNull(requestHeader, "request header must not be null");
|
||||
httpRequest.setHeader(requestHeader);
|
||||
httpRequest.addHeader(requestHeader);
|
||||
requestNames.add(requestHeader.getName());
|
||||
}
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
if (requestNames.contains(defaultHeader.getName()) == false) {
|
||||
httpRequest.addHeader(defaultHeader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -497,10 +508,21 @@ public final class RestClient implements Closeable {
|
||||
return httpRequest;
|
||||
}
|
||||
|
||||
private static URI buildUri(String path, Map<String, String> params) {
|
||||
private static URI buildUri(String pathPrefix, String path, Map<String, String> params) {
|
||||
Objects.requireNonNull(params, "params must not be null");
|
||||
try {
|
||||
URIBuilder uriBuilder = new URIBuilder(path);
|
||||
String fullPath;
|
||||
if (pathPrefix != null) {
|
||||
if (path.startsWith("/")) {
|
||||
fullPath = pathPrefix + path;
|
||||
} else {
|
||||
fullPath = pathPrefix + "/" + path;
|
||||
}
|
||||
} else {
|
||||
fullPath = path;
|
||||
}
|
||||
|
||||
URIBuilder uriBuilder = new URIBuilder(fullPath);
|
||||
for (Map.Entry<String, String> param : params.entrySet()) {
|
||||
uriBuilder.addParameter(param.getKey(), param.getValue());
|
||||
}
|
||||
|
@ -51,12 +51,17 @@ public final class RestClientBuilder {
|
||||
private RestClient.FailureListener failureListener;
|
||||
private HttpClientConfigCallback httpClientConfigCallback;
|
||||
private RequestConfigCallback requestConfigCallback;
|
||||
private String pathPrefix;
|
||||
|
||||
/**
|
||||
* Creates a new builder instance and sets the hosts that the client will send requests to.
|
||||
*
|
||||
* @throws NullPointerException if {@code hosts} or any host is {@code null}.
|
||||
* @throws IllegalArgumentException if {@code hosts} is empty.
|
||||
*/
|
||||
RestClientBuilder(HttpHost... hosts) {
|
||||
if (hosts == null || hosts.length == 0) {
|
||||
Objects.requireNonNull(hosts, "hosts must not be null");
|
||||
if (hosts.length == 0) {
|
||||
throw new IllegalArgumentException("no hosts provided");
|
||||
}
|
||||
for (HttpHost host : hosts) {
|
||||
@ -66,7 +71,11 @@ public final class RestClientBuilder {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the default request headers, which will be sent along with each request
|
||||
* Sets the default request headers, which will be sent along with each request.
|
||||
* <p>
|
||||
* Request-time headers will always overwrite any default headers.
|
||||
*
|
||||
* @throws NullPointerException if {@code defaultHeaders} or any header is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setDefaultHeaders(Header[] defaultHeaders) {
|
||||
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
|
||||
@ -79,6 +88,8 @@ public final class RestClientBuilder {
|
||||
|
||||
/**
|
||||
* Sets the {@link RestClient.FailureListener} to be notified for each request failure
|
||||
*
|
||||
* @throws NullPointerException if {@code failureListener} is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setFailureListener(RestClient.FailureListener failureListener) {
|
||||
Objects.requireNonNull(failureListener, "failureListener must not be null");
|
||||
@ -90,7 +101,7 @@ public final class RestClientBuilder {
|
||||
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
|
||||
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
|
||||
*
|
||||
* @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
|
||||
* @throws IllegalArgumentException if {@code maxRetryTimeoutMillis} is not greater than 0
|
||||
*/
|
||||
public RestClientBuilder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) {
|
||||
if (maxRetryTimeoutMillis <= 0) {
|
||||
@ -102,6 +113,8 @@ public final class RestClientBuilder {
|
||||
|
||||
/**
|
||||
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
|
||||
*
|
||||
* @throws NullPointerException if {@code httpClientConfigCallback} is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
|
||||
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
|
||||
@ -111,6 +124,8 @@ public final class RestClientBuilder {
|
||||
|
||||
/**
|
||||
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
|
||||
*
|
||||
* @throws NullPointerException if {@code requestConfigCallback} is {@code null}.
|
||||
*/
|
||||
public RestClientBuilder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
|
||||
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
|
||||
@ -118,6 +133,43 @@ public final class RestClientBuilder {
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the path's prefix for every request used by the http client.
|
||||
* <p>
|
||||
* For example, if this is set to "/my/path", then any client request will become <code>"/my/path/" + endpoint</code>.
|
||||
* <p>
|
||||
* In essence, every request's {@code endpoint} is prefixed by this {@code pathPrefix}. The path prefix is useful for when
|
||||
* Elasticsearch is behind a proxy that provides a base path; it is not intended for other purposes and it should not be supplied in
|
||||
* other scenarios.
|
||||
*
|
||||
* @throws NullPointerException if {@code pathPrefix} is {@code null}.
|
||||
* @throws IllegalArgumentException if {@code pathPrefix} is empty, only '/', or ends with more than one '/'.
|
||||
*/
|
||||
public RestClientBuilder setPathPrefix(String pathPrefix) {
|
||||
Objects.requireNonNull(pathPrefix, "pathPrefix must not be null");
|
||||
String cleanPathPrefix = pathPrefix;
|
||||
|
||||
if (cleanPathPrefix.startsWith("/") == false) {
|
||||
cleanPathPrefix = "/" + cleanPathPrefix;
|
||||
}
|
||||
|
||||
// best effort to ensure that it looks like "/base/path" rather than "/base/path/"
|
||||
if (cleanPathPrefix.endsWith("/")) {
|
||||
cleanPathPrefix = cleanPathPrefix.substring(0, cleanPathPrefix.length() - 1);
|
||||
|
||||
if (cleanPathPrefix.endsWith("/")) {
|
||||
throw new IllegalArgumentException("pathPrefix is malformed. too many trailing slashes: [" + pathPrefix + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (cleanPathPrefix.isEmpty() || "/".equals(cleanPathPrefix)) {
|
||||
throw new IllegalArgumentException("pathPrefix must not be empty or '/': [" + pathPrefix + "]");
|
||||
}
|
||||
|
||||
this.pathPrefix = cleanPathPrefix;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RestClient} based on the provided configuration.
|
||||
*/
|
||||
@ -126,7 +178,7 @@ public final class RestClientBuilder {
|
||||
failureListener = new RestClient.FailureListener();
|
||||
}
|
||||
CloseableHttpAsyncClient httpClient = createHttpClient();
|
||||
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
|
||||
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener);
|
||||
httpClient.start();
|
||||
return restClient;
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
@ -28,8 +27,10 @@ import org.apache.http.message.BasicHeader;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class RestClientBuilderTests extends RestClientTestCase {
|
||||
@ -38,8 +39,8 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
||||
try {
|
||||
RestClient.builder((HttpHost[])null);
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("no hosts provided", e.getMessage());
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("hosts must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
@ -62,7 +63,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
||||
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200))
|
||||
.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
|
||||
.setMaxRetryTimeoutMillis(randomIntBetween(Integer.MIN_VALUE, 0));
|
||||
fail("should have failed");
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertEquals("maxRetryTimeoutMillis must be greater than 0", e.getMessage());
|
||||
@ -103,13 +104,13 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
||||
assertEquals("requestConfigCallback must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
int numNodes = randomIntBetween(1, 5);
|
||||
HttpHost[] hosts = new HttpHost[numNodes];
|
||||
for (int i = 0; i < numNodes; i++) {
|
||||
hosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
}
|
||||
RestClientBuilder builder = RestClient.builder(hosts);
|
||||
if (getRandom().nextBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
|
||||
@Override
|
||||
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
|
||||
@ -117,7 +118,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
||||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
if (randomBoolean()) {
|
||||
builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
|
||||
@Override
|
||||
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
|
||||
@ -125,19 +126,55 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
||||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
if (randomBoolean()) {
|
||||
int numHeaders = randomIntBetween(1, 5);
|
||||
Header[] headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
headers[i] = new BasicHeader("header" + i, "value");
|
||||
}
|
||||
builder.setDefaultHeaders(headers);
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
|
||||
if (randomBoolean()) {
|
||||
builder.setMaxRetryTimeoutMillis(randomIntBetween(1, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String pathPrefix = (randomBoolean() ? "/" : "") + randomAsciiOfLengthBetween(2, 5);
|
||||
while (pathPrefix.length() < 20 && randomBoolean()) {
|
||||
pathPrefix += "/" + randomAsciiOfLengthBetween(3, 6);
|
||||
}
|
||||
builder.setPathPrefix(pathPrefix + (randomBoolean() ? "/" : ""));
|
||||
}
|
||||
try (RestClient restClient = builder.build()) {
|
||||
assertNotNull(restClient);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetPathPrefixNull() {
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(null);
|
||||
fail("pathPrefix set to null should fail!");
|
||||
} catch (final NullPointerException e) {
|
||||
assertEquals("pathPrefix must not be null", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetPathPrefixEmpty() {
|
||||
assertSetPathPrefixThrows("/");
|
||||
assertSetPathPrefixThrows("");
|
||||
}
|
||||
|
||||
public void testSetPathPrefixMalformed() {
|
||||
assertSetPathPrefixThrows("//");
|
||||
assertSetPathPrefixThrows("base/path//");
|
||||
}
|
||||
|
||||
private static void assertSetPathPrefixThrows(final String pathPrefix) {
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(pathPrefix);
|
||||
fail("path prefix [" + pathPrefix + "] should have failed");
|
||||
} catch (final IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString(pathPrefix));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -19,18 +19,15 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import com.sun.net.httpserver.Headers;
|
||||
import com.sun.net.httpserver.HttpContext;
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import com.sun.net.httpserver.HttpHandler;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.Consts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.junit.AfterClass;
|
||||
@ -60,6 +57,7 @@ import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
|
||||
@ -81,13 +79,8 @@ public class RestClientIntegTests extends RestClientTestCase {
|
||||
for (int statusCode : getAllStatusCodes()) {
|
||||
createStatusCodeContext(httpServer, statusCode);
|
||||
}
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
|
||||
defaultHeaders = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
defaultHeaders[i] = new BasicHeader(headerName, headerValue);
|
||||
}
|
||||
int numHeaders = randomIntBetween(0, 5);
|
||||
defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
|
||||
restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()))
|
||||
.setDefaultHeaders(defaultHeaders).build();
|
||||
}
|
||||
@ -146,44 +139,43 @@ public class RestClientIntegTests extends RestClientTestCase {
|
||||
*/
|
||||
public void testHeaders() throws IOException {
|
||||
for (String method : getHttpMethods()) {
|
||||
Set<String> standardHeaders = new HashSet<>(
|
||||
Arrays.asList("Connection", "Host", "User-agent", "Date"));
|
||||
final Set<String> standardHeaders = new HashSet<>(Arrays.asList("Connection", "Host", "User-agent", "Date"));
|
||||
if (method.equals("HEAD") == false) {
|
||||
standardHeaders.add("Content-length");
|
||||
}
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
Map<String, String> expectedHeaders = new HashMap<>();
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
|
||||
}
|
||||
Header[] headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
headers[i] = new BasicHeader(headerName, headerValue);
|
||||
expectedHeaders.put(headerName, headerValue);
|
||||
}
|
||||
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Header[] headers = generateHeaders("Header", "Header-array", numHeaders);
|
||||
final Map<String, List<String>> expectedHeaders = new HashMap<>();
|
||||
|
||||
addHeaders(expectedHeaders, defaultHeaders, headers);
|
||||
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(),
|
||||
(HttpEntity)null, headers);
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), headers);
|
||||
} catch(ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
for (Header responseHeader : esResponse.getHeaders()) {
|
||||
if (responseHeader.getName().startsWith("Header")) {
|
||||
String headerValue = expectedHeaders.remove(responseHeader.getName());
|
||||
assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
|
||||
for (final Header responseHeader : esResponse.getHeaders()) {
|
||||
final String name = responseHeader.getName();
|
||||
final String value = responseHeader.getValue();
|
||||
if (name.startsWith("Header")) {
|
||||
final List<String> values = expectedHeaders.get(name);
|
||||
assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
|
||||
assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
|
||||
|
||||
// we've collected them all
|
||||
if (values.isEmpty()) {
|
||||
expectedHeaders.remove(name);
|
||||
}
|
||||
} else {
|
||||
assertTrue("unknown header was returned " + responseHeader.getName(),
|
||||
standardHeaders.remove(responseHeader.getName()));
|
||||
assertTrue("unknown header was returned " + name, standardHeaders.remove(name));
|
||||
}
|
||||
}
|
||||
assertEquals("some headers that were sent weren't returned: " + expectedHeaders, 0, expectedHeaders.size());
|
||||
assertEquals("some expected standard headers weren't returned: " + standardHeaders, 0, standardHeaders.size());
|
||||
assertTrue("some headers that were sent weren't returned: " + expectedHeaders, expectedHeaders.isEmpty());
|
||||
assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
@ -205,6 +197,38 @@ public class RestClientIntegTests extends RestClientTestCase {
|
||||
bodyTest("GET");
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that pathPrefix works as expected.
|
||||
*/
|
||||
public void testPathPrefix() throws IOException {
|
||||
// guarantee no other test setup collides with this one and lets it sneak through
|
||||
final String uniqueContextSuffix = "/testPathPrefix";
|
||||
final String pathPrefix = "base/" + randomAsciiOfLengthBetween(1, 5) + "/";
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
|
||||
final HttpContext context =
|
||||
httpServer.createContext("/" + pathPrefix + statusCode + uniqueContextSuffix, new ResponseHandler(statusCode));
|
||||
|
||||
try (final RestClient client =
|
||||
RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()))
|
||||
.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefix).build()) {
|
||||
|
||||
for (final String method : getHttpMethods()) {
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = client.performRequest(method, "/" + statusCode + uniqueContextSuffix);
|
||||
} catch(ResponseException e) {
|
||||
esResponse = e.getResponse();
|
||||
}
|
||||
|
||||
assertThat(esResponse.getRequestLine().getUri(), equalTo("/" + pathPrefix + statusCode + uniqueContextSuffix));
|
||||
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
}
|
||||
} finally {
|
||||
httpServer.removeContext(context);
|
||||
}
|
||||
}
|
||||
|
||||
private void bodyTest(String method) throws IOException {
|
||||
String requestBody = "{ \"field\": \"value\" }";
|
||||
StringEntity entity = new StringEntity(requestBody);
|
||||
@ -226,7 +250,7 @@ public class RestClientIntegTests extends RestClientTestCase {
|
||||
for (int i = 0; i < numRequests; i++) {
|
||||
final String method = RestClientTestUtil.randomHttpMethod(getRandom());
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
restClient.performRequest(method, "/" + statusCode, new ResponseListener() {
|
||||
restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
responses.add(new TestResponse(method, statusCode, response));
|
||||
|
@ -101,7 +101,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
||||
httpHosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
}
|
||||
failureListener = new HostsTrackingFailureListener();
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener);
|
||||
}
|
||||
|
||||
public void testRoundRobinOkStatusCodes() throws IOException {
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
@ -41,7 +39,6 @@ import org.apache.http.concurrent.FutureCallback;
|
||||
import org.apache.http.conn.ConnectTimeoutException;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
@ -58,7 +55,10 @@ import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
|
||||
@ -132,16 +132,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
});
|
||||
|
||||
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
|
||||
defaultHeaders = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
defaultHeaders[i] = new BasicHeader(headerName, headerValue);
|
||||
}
|
||||
int numHeaders = randomIntBetween(0, 3);
|
||||
defaultHeaders = generateHeaders("Header-default", "Header-array", numHeaders);
|
||||
httpHost = new HttpHost("localhost", 9200);
|
||||
failureListener = new HostsTrackingFailureListener();
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -333,20 +328,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
*/
|
||||
public void testHeaders() throws IOException {
|
||||
for (String method : getHttpMethods()) {
|
||||
Map<String, String> expectedHeaders = new HashMap<>();
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
|
||||
}
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
Header[] headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
headers[i] = new BasicHeader(headerName, headerValue);
|
||||
expectedHeaders.put(headerName, headerValue);
|
||||
}
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Header[] headers = generateHeaders("Header", null, numHeaders);
|
||||
final Map<String, List<String>> expectedHeaders = new HashMap<>();
|
||||
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
addHeaders(expectedHeaders, defaultHeaders, headers);
|
||||
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(method, "/" + statusCode, headers);
|
||||
@ -355,10 +343,18 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
}
|
||||
assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
for (Header responseHeader : esResponse.getHeaders()) {
|
||||
String headerValue = expectedHeaders.remove(responseHeader.getName());
|
||||
assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
|
||||
final String name = responseHeader.getName();
|
||||
final String value = responseHeader.getValue();
|
||||
final List<String> values = expectedHeaders.get(name);
|
||||
assertNotNull("found response header [" + name + "] that wasn't originally sent: " + value, values);
|
||||
assertTrue("found incorrect response header [" + name + "]: " + value, values.remove(value));
|
||||
|
||||
// we've collected them all
|
||||
if (values.isEmpty()) {
|
||||
expectedHeaders.remove(name);
|
||||
}
|
||||
}
|
||||
assertEquals("some headers that were sent weren't returned " + expectedHeaders, 0, expectedHeaders.size());
|
||||
assertTrue("some headers that were sent weren't returned " + expectedHeaders, expectedHeaders.isEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
@ -368,11 +364,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
Map<String, String> params = Collections.emptyMap();
|
||||
boolean hasParams = randomBoolean();
|
||||
if (hasParams) {
|
||||
int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3);
|
||||
int numParams = randomIntBetween(1, 3);
|
||||
params = new HashMap<>(numParams);
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
String paramKey = "param-" + i;
|
||||
String paramValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
String paramValue = randomAsciiOfLengthBetween(3, 10);
|
||||
params.put(paramKey, paramValue);
|
||||
uriBuilder.addParameter(paramKey, paramValue);
|
||||
}
|
||||
@ -412,24 +408,24 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
HttpEntity entity = null;
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
if (hasBody) {
|
||||
entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100));
|
||||
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100));
|
||||
((HttpEntityEnclosingRequest) request).setEntity(entity);
|
||||
}
|
||||
|
||||
Header[] headers = new Header[0];
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
//default headers are expected but not sent for each request
|
||||
request.setHeader(defaultHeader);
|
||||
final int numHeaders = randomIntBetween(1, 5);
|
||||
final Set<String> uniqueNames = new HashSet<>(numHeaders);
|
||||
if (randomBoolean()) {
|
||||
headers = generateHeaders("Header", "Header-array", numHeaders);
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header);
|
||||
uniqueNames.add(header.getName());
|
||||
}
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
headers = new Header[numHeaders];
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
|
||||
String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
|
||||
BasicHeader basicHeader = new BasicHeader(headerName, headerValue);
|
||||
headers[i] = basicHeader;
|
||||
request.setHeader(basicHeader);
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
// request level headers override default headers
|
||||
if (uniqueNames.contains(defaultHeader.getName()) == false) {
|
||||
request.addHeader(defaultHeader);
|
||||
}
|
||||
}
|
||||
|
||||
@ -459,4 +455,5 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||
* {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the
|
||||
* previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
|
||||
*/
|
||||
public final class Sniffer implements Closeable {
|
||||
public class Sniffer implements Closeable {
|
||||
|
||||
private static final Log logger = LogFactory.getLog(Sniffer.class);
|
||||
|
||||
|
@ -26,10 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'
|
||||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
sourceCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
install.enabled = false
|
||||
uploadArchives.enabled = false
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
compile "junit:junit:${versions.junit}"
|
||||
compile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
@ -60,4 +58,4 @@ namingConventions.enabled = false
|
||||
|
||||
//we aren't releasing this jar
|
||||
thirdPartyAudit.enabled = false
|
||||
test.enabled = false
|
||||
test.enabled = false
|
||||
|
@ -31,6 +31,15 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
@TestMethodProviders({
|
||||
JUnit3MethodProvider.class
|
||||
})
|
||||
@ -43,4 +52,71 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
@TimeoutSuite(millis = 2 * 60 * 60 * 1000)
|
||||
public abstract class RestClientTestCase extends RandomizedTest {
|
||||
|
||||
/**
|
||||
* Create the specified number of {@link Header}s.
|
||||
* <p>
|
||||
* Generated header names will be the {@code baseName} plus its index or, rarely, the {@code arrayName} if it's supplied.
|
||||
*
|
||||
* @param baseName The base name to use for all headers.
|
||||
* @param arrayName The optional ({@code null}able) array name to use randomly.
|
||||
* @param headers The number of headers to create.
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
protected static Header[] generateHeaders(final String baseName, final String arrayName, final int headers) {
|
||||
final Header[] generated = new Header[headers];
|
||||
for (int i = 0; i < headers; i++) {
|
||||
String headerName = baseName + i;
|
||||
if (arrayName != null && rarely()) {
|
||||
headerName = arrayName;
|
||||
}
|
||||
|
||||
generated[i] = new BasicHeader(headerName, randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
return generated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link List} within the {@code map} if none exists for {@code name} or append to the existing list.
|
||||
*
|
||||
* @param map The map to manipulate.
|
||||
* @param name The name to create/append the list for.
|
||||
* @param value The value to add.
|
||||
*/
|
||||
private static void createOrAppendList(final Map<String, List<String>> map, final String name, final String value) {
|
||||
List<String> values = map.get(name);
|
||||
|
||||
if (values == null) {
|
||||
values = new ArrayList<>();
|
||||
map.put(name, values);
|
||||
}
|
||||
|
||||
values.add(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the {@code headers} to the {@code map} so that related tests can more easily assert that they exist.
|
||||
* <p>
|
||||
* If both the {@code defaultHeaders} and {@code headers} contain the same {@link Header}, based on its
|
||||
* {@linkplain Header#getName() name}, then this will only use the {@code Header}(s) from {@code headers}.
|
||||
*
|
||||
* @param map The map to build with name/value(s) pairs.
|
||||
* @param defaultHeaders The headers to add to the map representing default headers.
|
||||
* @param headers The headers to add to the map representing request-level headers.
|
||||
* @see #createOrAppendList(Map, String, String)
|
||||
*/
|
||||
protected static void addHeaders(final Map<String, List<String>> map, final Header[] defaultHeaders, final Header[] headers) {
|
||||
final Set<String> uniqueHeaders = new HashSet<>();
|
||||
for (final Header header : headers) {
|
||||
final String name = header.getName();
|
||||
createOrAppendList(map, name, header.getValue());
|
||||
uniqueHeaders.add(name);
|
||||
}
|
||||
for (final Header defaultHeader : defaultHeaders) {
|
||||
final String name = defaultHeader.getName();
|
||||
if (uniqueHeaders.contains(name) == false) {
|
||||
createOrAppendList(map, name, defaultHeader.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.transport.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -40,6 +41,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
|
||||
|
||||
@Test
|
||||
public void testPluginInstalled() {
|
||||
// TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778
|
||||
assumeFalse(Constants.JRE_IS_MINIMUM_JAVA9);
|
||||
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
|
||||
Settings settings = client.settings();
|
||||
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
@ -49,9 +52,7 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
|
||||
|
||||
@Test
|
||||
public void testInstallPluginTwice() {
|
||||
|
||||
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class,
|
||||
MustachePlugin.class)) {
|
||||
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class)) {
|
||||
try {
|
||||
new PreBuiltTransportClient(Settings.EMPTY, plugin);
|
||||
fail("exception expected");
|
||||
|
@ -22,7 +22,6 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'com.bmuschko.nexus'
|
||||
apply plugin: 'nebula.optional-base'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
@ -85,8 +84,10 @@ dependencies {
|
||||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||
|
||||
// logging
|
||||
compile "log4j:log4j:${versions.log4j}", optional
|
||||
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}", optional
|
||||
compile "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional
|
||||
// to bridge dependencies that are still on Log4j 1 to Log4j 2
|
||||
compile "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}", optional
|
||||
|
||||
compile "net.java.dev.jna:jna:${versions.jna}"
|
||||
|
||||
@ -154,32 +155,94 @@ thirdPartyAudit.excludes = [
|
||||
// classes are missing!
|
||||
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
|
||||
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageListener',
|
||||
'javax.jms.ObjectMessage',
|
||||
'javax.jms.TopicConnection',
|
||||
'javax.jms.TopicConnectionFactory',
|
||||
'javax.jms.TopicPublisher',
|
||||
'javax.jms.TopicSession',
|
||||
'javax.jms.TopicSubscriber',
|
||||
// from log4j
|
||||
'com.fasterxml.jackson.annotation.JsonInclude$Include',
|
||||
'com.fasterxml.jackson.databind.DeserializationContext',
|
||||
'com.fasterxml.jackson.databind.JsonMappingException',
|
||||
'com.fasterxml.jackson.databind.JsonNode',
|
||||
'com.fasterxml.jackson.databind.Module$SetupContext',
|
||||
'com.fasterxml.jackson.databind.ObjectReader',
|
||||
'com.fasterxml.jackson.databind.ObjectWriter',
|
||||
'com.fasterxml.jackson.databind.SerializerProvider',
|
||||
'com.fasterxml.jackson.databind.deser.std.StdDeserializer',
|
||||
'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer',
|
||||
'com.fasterxml.jackson.databind.module.SimpleModule',
|
||||
'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter',
|
||||
'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider',
|
||||
'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer',
|
||||
'com.fasterxml.jackson.databind.ser.std.StdSerializer',
|
||||
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
|
||||
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
|
||||
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
|
||||
'com.lmax.disruptor.BlockingWaitStrategy',
|
||||
'com.lmax.disruptor.BusySpinWaitStrategy',
|
||||
'com.lmax.disruptor.EventFactory',
|
||||
'com.lmax.disruptor.EventTranslator',
|
||||
'com.lmax.disruptor.EventTranslatorTwoArg',
|
||||
'com.lmax.disruptor.EventTranslatorVararg',
|
||||
'com.lmax.disruptor.ExceptionHandler',
|
||||
'com.lmax.disruptor.LifecycleAware',
|
||||
'com.lmax.disruptor.RingBuffer',
|
||||
'com.lmax.disruptor.Sequence',
|
||||
'com.lmax.disruptor.SequenceReportingEventHandler',
|
||||
'com.lmax.disruptor.SleepingWaitStrategy',
|
||||
'com.lmax.disruptor.TimeoutBlockingWaitStrategy',
|
||||
'com.lmax.disruptor.WaitStrategy',
|
||||
'com.lmax.disruptor.YieldingWaitStrategy',
|
||||
'com.lmax.disruptor.dsl.Disruptor',
|
||||
'com.lmax.disruptor.dsl.ProducerType',
|
||||
'javax.jms.Connection',
|
||||
'javax.jms.ConnectionFactory',
|
||||
'javax.jms.Destination',
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageConsumer',
|
||||
'javax.jms.MessageListener',
|
||||
'javax.jms.MessageProducer',
|
||||
'javax.jms.ObjectMessage',
|
||||
'javax.jms.Session',
|
||||
'javax.mail.Authenticator',
|
||||
'javax.mail.Message$RecipientType',
|
||||
'javax.mail.PasswordAuthentication',
|
||||
'javax.mail.Session',
|
||||
'javax.mail.Transport',
|
||||
'javax.mail.internet.InternetAddress',
|
||||
'javax.mail.internet.InternetHeaders',
|
||||
'javax.mail.internet.MimeBodyPart',
|
||||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
'javax.mail.util.ByteArrayDataSource',
|
||||
'javax.persistence.AttributeConverter',
|
||||
'javax.persistence.EntityManager',
|
||||
'javax.persistence.EntityManagerFactory',
|
||||
'javax.persistence.EntityTransaction',
|
||||
'javax.persistence.Persistence',
|
||||
'javax.persistence.PersistenceException',
|
||||
'org.apache.commons.compress.compressors.CompressorStreamFactory',
|
||||
'org.apache.commons.compress.utils.IOUtils',
|
||||
'org.apache.commons.csv.CSVFormat',
|
||||
'org.apache.commons.csv.QuoteMode',
|
||||
'org.apache.kafka.clients.producer.KafkaProducer',
|
||||
'org.apache.kafka.clients.producer.Producer',
|
||||
'org.apache.kafka.clients.producer.ProducerRecord',
|
||||
'org.codehaus.stax2.XMLStreamWriter2',
|
||||
'org.osgi.framework.AdaptPermission',
|
||||
'org.osgi.framework.AdminPermission',
|
||||
'org.osgi.framework.Bundle',
|
||||
'org.osgi.framework.BundleActivator',
|
||||
'org.osgi.framework.BundleContext',
|
||||
'org.osgi.framework.BundleEvent',
|
||||
'org.osgi.framework.BundleReference',
|
||||
'org.osgi.framework.FrameworkUtil',
|
||||
'org.osgi.framework.SynchronousBundleListener',
|
||||
'org.osgi.framework.wiring.BundleWire',
|
||||
'org.osgi.framework.wiring.BundleWiring',
|
||||
'org.zeromq.ZMQ$Context',
|
||||
'org.zeromq.ZMQ$Socket',
|
||||
'org.zeromq.ZMQ',
|
||||
|
||||
// from org.apache.log4j.net.SMTPAppender (log4j)
|
||||
'javax.mail.Authenticator',
|
||||
'javax.mail.Message$RecipientType',
|
||||
'javax.mail.Message',
|
||||
'javax.mail.Multipart',
|
||||
'javax.mail.PasswordAuthentication',
|
||||
'javax.mail.Session',
|
||||
'javax.mail.Transport',
|
||||
'javax.mail.internet.InternetAddress',
|
||||
'javax.mail.internet.InternetHeaders',
|
||||
'javax.mail.internet.MimeBodyPart',
|
||||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
]
|
||||
|
@ -0,0 +1,665 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache license, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the license for the specific language governing permissions and
|
||||
* limitations under the license.
|
||||
*/
|
||||
|
||||
package org.apache.logging.log4j.core.impl;
|
||||
|
||||
import java.io.Serializable;
|
||||
import java.net.URL;
|
||||
import java.security.CodeSource;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Stack;
|
||||
|
||||
import org.apache.logging.log4j.core.util.Loader;
|
||||
import org.apache.logging.log4j.status.StatusLogger;
|
||||
import org.apache.logging.log4j.util.ReflectionUtil;
|
||||
import org.apache.logging.log4j.util.Strings;
|
||||
|
||||
/**
|
||||
* Wraps a Throwable to add packaging information about each stack trace element.
|
||||
*
|
||||
* <p>
|
||||
* A proxy is used to represent a throwable that may not exist in a different class loader or JVM. When an application
|
||||
* deserializes a ThrowableProxy, the throwable may not be set, but the throwable's information is preserved in other
|
||||
* fields of the proxy like the message and stack trace.
|
||||
* </p>
|
||||
*
|
||||
* <p>
|
||||
* TODO: Move this class to org.apache.logging.log4j.core because it is used from LogEvent.
|
||||
* </p>
|
||||
* <p>
|
||||
* TODO: Deserialize: Try to rebuild Throwable if the target exception is in this class loader?
|
||||
* </p>
|
||||
*/
|
||||
public class ThrowableProxy implements Serializable {
|
||||
|
||||
private static final String CAUSED_BY_LABEL = "Caused by: ";
|
||||
private static final String SUPPRESSED_LABEL = "Suppressed: ";
|
||||
private static final String WRAPPED_BY_LABEL = "Wrapped by: ";
|
||||
|
||||
/**
|
||||
* Cached StackTracePackageElement and ClassLoader.
|
||||
* <p>
|
||||
* Consider this class private.
|
||||
* </p>
|
||||
*/
|
||||
static class CacheEntry {
|
||||
private final ExtendedClassInfo element;
|
||||
private final ClassLoader loader;
|
||||
|
||||
public CacheEntry(final ExtendedClassInfo element, final ClassLoader loader) {
|
||||
this.element = element;
|
||||
this.loader = loader;
|
||||
}
|
||||
}
|
||||
|
||||
private static final ThrowableProxy[] EMPTY_THROWABLE_PROXY_ARRAY = new ThrowableProxy[0];
|
||||
|
||||
private static final char EOL = '\n';
|
||||
|
||||
private static final long serialVersionUID = -2752771578252251910L;
|
||||
|
||||
private final ThrowableProxy causeProxy;
|
||||
|
||||
private int commonElementCount;
|
||||
|
||||
private final ExtendedStackTraceElement[] extendedStackTrace;
|
||||
|
||||
private final String localizedMessage;
|
||||
|
||||
private final String message;
|
||||
|
||||
private final String name;
|
||||
|
||||
private final ThrowableProxy[] suppressedProxies;
|
||||
|
||||
private final transient Throwable throwable;
|
||||
|
||||
/**
|
||||
* For JSON and XML IO via Jackson.
|
||||
*/
|
||||
@SuppressWarnings("unused")
|
||||
private ThrowableProxy() {
|
||||
this.throwable = null;
|
||||
this.name = null;
|
||||
this.extendedStackTrace = null;
|
||||
this.causeProxy = null;
|
||||
this.message = null;
|
||||
this.localizedMessage = null;
|
||||
this.suppressedProxies = EMPTY_THROWABLE_PROXY_ARRAY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs the wrapper for the Throwable that includes packaging data.
|
||||
*
|
||||
* @param throwable
|
||||
* The Throwable to wrap, must not be null.
|
||||
*/
|
||||
public ThrowableProxy(final Throwable throwable) {
|
||||
this(throwable, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs the wrapper for the Throwable that includes packaging data.
|
||||
*
|
||||
* @param throwable
|
||||
* The Throwable to wrap, must not be null.
|
||||
* @param visited
|
||||
* The set of visited suppressed exceptions.
|
||||
*/
|
||||
private ThrowableProxy(final Throwable throwable, final Set<Throwable> visited) {
|
||||
this.throwable = throwable;
|
||||
this.name = throwable.getClass().getName();
|
||||
this.message = throwable.getMessage();
|
||||
this.localizedMessage = throwable.getLocalizedMessage();
|
||||
final Map<String, CacheEntry> map = new HashMap<>();
|
||||
final Stack<Class<?>> stack = ReflectionUtil.getCurrentStackTrace();
|
||||
this.extendedStackTrace = this.toExtendedStackTrace(stack, map, null, throwable.getStackTrace());
|
||||
final Throwable throwableCause = throwable.getCause();
|
||||
final Set<Throwable> causeVisited = new HashSet<>(1);
|
||||
this.causeProxy = throwableCause == null ? null : new ThrowableProxy(throwable, stack, map, throwableCause, visited, causeVisited);
|
||||
this.suppressedProxies = this.toSuppressedProxies(throwable, visited);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs the wrapper for a Throwable that is referenced as the cause by another Throwable.
|
||||
*
|
||||
* @param parent
|
||||
* The Throwable referencing this Throwable.
|
||||
* @param stack
|
||||
* The Class stack.
|
||||
* @param map
|
||||
* The cache containing the packaging data.
|
||||
* @param cause
|
||||
* The Throwable to wrap.
|
||||
* @param suppressedVisited TODO
|
||||
* @param causeVisited TODO
|
||||
*/
|
||||
private ThrowableProxy(final Throwable parent, final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
|
||||
final Throwable cause, final Set<Throwable> suppressedVisited, final Set<Throwable> causeVisited) {
|
||||
causeVisited.add(cause);
|
||||
this.throwable = cause;
|
||||
this.name = cause.getClass().getName();
|
||||
this.message = this.throwable.getMessage();
|
||||
this.localizedMessage = this.throwable.getLocalizedMessage();
|
||||
this.extendedStackTrace = this.toExtendedStackTrace(stack, map, parent.getStackTrace(), cause.getStackTrace());
|
||||
final Throwable causeCause = cause.getCause();
|
||||
this.causeProxy = causeCause == null || causeVisited.contains(causeCause) ? null : new ThrowableProxy(parent,
|
||||
stack, map, causeCause, suppressedVisited, causeVisited);
|
||||
this.suppressedProxies = this.toSuppressedProxies(cause, suppressedVisited);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(final Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (this.getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
final ThrowableProxy other = (ThrowableProxy) obj;
|
||||
if (this.causeProxy == null) {
|
||||
if (other.causeProxy != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!this.causeProxy.equals(other.causeProxy)) {
|
||||
return false;
|
||||
}
|
||||
if (this.commonElementCount != other.commonElementCount) {
|
||||
return false;
|
||||
}
|
||||
if (this.name == null) {
|
||||
if (other.name != null) {
|
||||
return false;
|
||||
}
|
||||
} else if (!this.name.equals(other.name)) {
|
||||
return false;
|
||||
}
|
||||
if (!Arrays.equals(this.extendedStackTrace, other.extendedStackTrace)) {
|
||||
return false;
|
||||
}
|
||||
if (!Arrays.equals(this.suppressedProxies, other.suppressedProxies)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void formatCause(final StringBuilder sb, final String prefix, final ThrowableProxy cause, final List<String> ignorePackages) {
|
||||
formatThrowableProxy(sb, prefix, CAUSED_BY_LABEL, cause, ignorePackages);
|
||||
}
|
||||
|
||||
private void formatThrowableProxy(final StringBuilder sb, final String prefix, final String causeLabel,
|
||||
final ThrowableProxy throwableProxy, final List<String> ignorePackages) {
|
||||
if (throwableProxy == null) {
|
||||
return;
|
||||
}
|
||||
sb.append(prefix).append(causeLabel).append(throwableProxy).append(EOL);
|
||||
this.formatElements(sb, prefix, throwableProxy.commonElementCount,
|
||||
throwableProxy.getStackTrace(), throwableProxy.extendedStackTrace, ignorePackages);
|
||||
this.formatSuppressed(sb, prefix + "\t", throwableProxy.suppressedProxies, ignorePackages);
|
||||
this.formatCause(sb, prefix, throwableProxy.causeProxy, ignorePackages);
|
||||
}
|
||||
|
||||
private void formatSuppressed(final StringBuilder sb, final String prefix, final ThrowableProxy[] suppressedProxies,
|
||||
final List<String> ignorePackages) {
|
||||
if (suppressedProxies == null) {
|
||||
return;
|
||||
}
|
||||
for (final ThrowableProxy suppressedProxy : suppressedProxies) {
|
||||
final ThrowableProxy cause = suppressedProxy;
|
||||
formatThrowableProxy(sb, prefix, SUPPRESSED_LABEL, cause, ignorePackages);
|
||||
}
|
||||
}
|
||||
|
||||
private void formatElements(final StringBuilder sb, final String prefix, final int commonCount,
|
||||
final StackTraceElement[] causedTrace, final ExtendedStackTraceElement[] extStackTrace,
|
||||
final List<String> ignorePackages) {
|
||||
if (ignorePackages == null || ignorePackages.isEmpty()) {
|
||||
for (final ExtendedStackTraceElement element : extStackTrace) {
|
||||
this.formatEntry(element, sb, prefix);
|
||||
}
|
||||
} else {
|
||||
int count = 0;
|
||||
for (int i = 0; i < extStackTrace.length; ++i) {
|
||||
if (!this.ignoreElement(causedTrace[i], ignorePackages)) {
|
||||
if (count > 0) {
|
||||
appendSuppressedCount(sb, prefix, count);
|
||||
count = 0;
|
||||
}
|
||||
this.formatEntry(extStackTrace[i], sb, prefix);
|
||||
} else {
|
||||
++count;
|
||||
}
|
||||
}
|
||||
if (count > 0) {
|
||||
appendSuppressedCount(sb, prefix, count);
|
||||
}
|
||||
}
|
||||
if (commonCount != 0) {
|
||||
sb.append(prefix).append("\t... ").append(commonCount).append(" more").append(EOL);
|
||||
}
|
||||
}
|
||||
|
||||
private void appendSuppressedCount(final StringBuilder sb, final String prefix, final int count) {
|
||||
sb.append(prefix);
|
||||
if (count == 1) {
|
||||
sb.append("\t....").append(EOL);
|
||||
} else {
|
||||
sb.append("\t... suppressed ").append(count).append(" lines").append(EOL);
|
||||
}
|
||||
}
|
||||
|
||||
private void formatEntry(final ExtendedStackTraceElement extStackTraceElement, final StringBuilder sb, final String prefix) {
|
||||
sb.append(prefix);
|
||||
sb.append("\tat ");
|
||||
sb.append(extStackTraceElement);
|
||||
sb.append(EOL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats the specified Throwable.
|
||||
*
|
||||
* @param sb
|
||||
* StringBuilder to contain the formatted Throwable.
|
||||
* @param cause
|
||||
* The Throwable to format.
|
||||
*/
|
||||
public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause) {
|
||||
this.formatWrapper(sb, cause, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats the specified Throwable.
|
||||
*
|
||||
* @param sb
|
||||
* StringBuilder to contain the formatted Throwable.
|
||||
* @param cause
|
||||
* The Throwable to format.
|
||||
* @param packages
|
||||
* The List of packages to be suppressed from the trace.
|
||||
*/
|
||||
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
|
||||
public void formatWrapper(final StringBuilder sb, final ThrowableProxy cause, final List<String> packages) {
|
||||
final Throwable caused = cause.getCauseProxy() != null ? cause.getCauseProxy().getThrowable() : null;
|
||||
if (caused != null) {
|
||||
this.formatWrapper(sb, cause.causeProxy);
|
||||
sb.append(WRAPPED_BY_LABEL);
|
||||
}
|
||||
sb.append(cause).append(EOL);
|
||||
this.formatElements(sb, "", cause.commonElementCount,
|
||||
cause.getThrowable().getStackTrace(), cause.extendedStackTrace, packages);
|
||||
}
|
||||
|
||||
public ThrowableProxy getCauseProxy() {
|
||||
return this.causeProxy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format the Throwable that is the cause of this Throwable.
|
||||
*
|
||||
* @return The formatted Throwable that caused this Throwable.
|
||||
*/
|
||||
public String getCauseStackTraceAsString() {
|
||||
return this.getCauseStackTraceAsString(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Format the Throwable that is the cause of this Throwable.
|
||||
*
|
||||
* @param packages
|
||||
* The List of packages to be suppressed from the trace.
|
||||
* @return The formatted Throwable that caused this Throwable.
|
||||
*/
|
||||
public String getCauseStackTraceAsString(final List<String> packages) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
if (this.causeProxy != null) {
|
||||
this.formatWrapper(sb, this.causeProxy);
|
||||
sb.append(WRAPPED_BY_LABEL);
|
||||
}
|
||||
sb.append(this.toString());
|
||||
sb.append(EOL);
|
||||
this.formatElements(sb, "", 0, this.throwable.getStackTrace(), this.extendedStackTrace, packages);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of elements that are being omitted because they are common with the parent Throwable's stack
|
||||
* trace.
|
||||
*
|
||||
* @return The number of elements omitted from the stack trace.
|
||||
*/
|
||||
public int getCommonElementCount() {
|
||||
return this.commonElementCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the stack trace including packaging information.
|
||||
*
|
||||
* @return The stack trace including packaging information.
|
||||
*/
|
||||
public ExtendedStackTraceElement[] getExtendedStackTrace() {
|
||||
return this.extendedStackTrace;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format the stack trace including packaging information.
|
||||
*
|
||||
* @return The formatted stack trace including packaging information.
|
||||
*/
|
||||
public String getExtendedStackTraceAsString() {
|
||||
return this.getExtendedStackTraceAsString(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Format the stack trace including packaging information.
|
||||
*
|
||||
* @param ignorePackages
|
||||
* List of packages to be ignored in the trace.
|
||||
* @return The formatted stack trace including packaging information.
|
||||
*/
|
||||
public String getExtendedStackTraceAsString(final List<String> ignorePackages) {
|
||||
final StringBuilder sb = new StringBuilder(this.name);
|
||||
final String msg = this.message;
|
||||
if (msg != null) {
|
||||
sb.append(": ").append(msg);
|
||||
}
|
||||
sb.append(EOL);
|
||||
final StackTraceElement[] causedTrace = this.throwable != null ? this.throwable.getStackTrace() : null;
|
||||
this.formatElements(sb, "", 0, causedTrace, this.extendedStackTrace, ignorePackages);
|
||||
this.formatSuppressed(sb, "\t", this.suppressedProxies, ignorePackages);
|
||||
this.formatCause(sb, "", this.causeProxy, ignorePackages);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public String getLocalizedMessage() {
|
||||
return this.localizedMessage;
|
||||
}
|
||||
|
||||
public String getMessage() {
|
||||
return this.message;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the FQCN of the Throwable.
|
||||
*
|
||||
* @return The FQCN of the Throwable.
|
||||
*/
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public StackTraceElement[] getStackTrace() {
|
||||
return this.throwable == null ? null : this.throwable.getStackTrace();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets proxies for suppressed exceptions.
|
||||
*
|
||||
* @return proxies for suppressed exceptions.
|
||||
*/
|
||||
public ThrowableProxy[] getSuppressedProxies() {
|
||||
return this.suppressedProxies;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format the suppressed Throwables.
|
||||
*
|
||||
* @return The formatted suppressed Throwables.
|
||||
*/
|
||||
public String getSuppressedStackTrace() {
|
||||
final ThrowableProxy[] suppressed = this.getSuppressedProxies();
|
||||
if (suppressed == null || suppressed.length == 0) {
|
||||
return Strings.EMPTY;
|
||||
}
|
||||
final StringBuilder sb = new StringBuilder("Suppressed Stack Trace Elements:").append(EOL);
|
||||
for (final ThrowableProxy proxy : suppressed) {
|
||||
sb.append(proxy.getExtendedStackTraceAsString());
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* The throwable or null if this object is deserialized from XML or JSON.
|
||||
*
|
||||
* @return The throwable or null if this object is deserialized from XML or JSON.
|
||||
*/
|
||||
public Throwable getThrowable() {
|
||||
return this.throwable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
final int prime = 31;
|
||||
int result = 1;
|
||||
result = prime * result + (this.causeProxy == null ? 0 : this.causeProxy.hashCode());
|
||||
result = prime * result + this.commonElementCount;
|
||||
result = prime * result + (this.extendedStackTrace == null ? 0 : Arrays.hashCode(this.extendedStackTrace));
|
||||
result = prime * result + (this.suppressedProxies == null ? 0 : Arrays.hashCode(this.suppressedProxies));
|
||||
result = prime * result + (this.name == null ? 0 : this.name.hashCode());
|
||||
return result;
|
||||
}
|
||||
|
||||
private boolean ignoreElement(final StackTraceElement element, final List<String> ignorePackages) {
|
||||
final String className = element.getClassName();
|
||||
for (final String pkg : ignorePackages) {
|
||||
if (className.startsWith(pkg)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads classes not located via Reflection.getCallerClass.
|
||||
*
|
||||
* @param lastLoader
|
||||
* The ClassLoader that loaded the Class that called this Class.
|
||||
* @param className
|
||||
* The name of the Class.
|
||||
* @return The Class object for the Class or null if it could not be located.
|
||||
*/
|
||||
private Class<?> loadClass(final ClassLoader lastLoader, final String className) {
|
||||
// XXX: this is overly complicated
|
||||
Class<?> clazz;
|
||||
if (lastLoader != null) {
|
||||
try {
|
||||
clazz = Loader.initializeClass(className, lastLoader);
|
||||
if (clazz != null) {
|
||||
return clazz;
|
||||
}
|
||||
} catch (final Throwable ignore) {
|
||||
// Ignore exception.
|
||||
}
|
||||
}
|
||||
try {
|
||||
clazz = Loader.loadClass(className);
|
||||
} catch (final ClassNotFoundException ignored) {
|
||||
return initializeClass(className);
|
||||
} catch (final NoClassDefFoundError ignored) {
|
||||
return initializeClass(className);
|
||||
} catch (final SecurityException ignored) {
|
||||
return initializeClass(className);
|
||||
}
|
||||
return clazz;
|
||||
}
|
||||
|
||||
private Class<?> initializeClass(final String className) {
|
||||
try {
|
||||
return Loader.initializeClass(className, this.getClass().getClassLoader());
|
||||
} catch (final ClassNotFoundException ignore) {
|
||||
return null;
|
||||
} catch (final NoClassDefFoundError ignore) {
|
||||
return null;
|
||||
} catch (final SecurityException ignore) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the CacheEntry from the Class's information.
|
||||
*
|
||||
* @param stackTraceElement
|
||||
* The stack trace element
|
||||
* @param callerClass
|
||||
* The Class.
|
||||
* @param exact
|
||||
* True if the class was obtained via Reflection.getCallerClass.
|
||||
*
|
||||
* @return The CacheEntry.
|
||||
*/
|
||||
private CacheEntry toCacheEntry(final StackTraceElement stackTraceElement, final Class<?> callerClass,
|
||||
final boolean exact) {
|
||||
String location = "?";
|
||||
String version = "?";
|
||||
ClassLoader lastLoader = null;
|
||||
if (callerClass != null) {
|
||||
try {
|
||||
final CodeSource source = callerClass.getProtectionDomain().getCodeSource();
|
||||
if (source != null) {
|
||||
final URL locationURL = source.getLocation();
|
||||
if (locationURL != null) {
|
||||
final String str = locationURL.toString().replace('\\', '/');
|
||||
int index = str.lastIndexOf("/");
|
||||
if (index >= 0 && index == str.length() - 1) {
|
||||
index = str.lastIndexOf("/", index - 1);
|
||||
location = str.substring(index + 1);
|
||||
} else {
|
||||
location = str.substring(index + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (final Exception ex) {
|
||||
// Ignore the exception.
|
||||
}
|
||||
final Package pkg = callerClass.getPackage();
|
||||
if (pkg != null) {
|
||||
final String ver = pkg.getImplementationVersion();
|
||||
if (ver != null) {
|
||||
version = ver;
|
||||
}
|
||||
}
|
||||
lastLoader = callerClass.getClassLoader();
|
||||
}
|
||||
return new CacheEntry(new ExtendedClassInfo(exact, location, version), lastLoader);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve all the stack entries in this stack trace that are not common with the parent.
|
||||
*
|
||||
* @param stack
|
||||
* The callers Class stack.
|
||||
* @param map
|
||||
* The cache of CacheEntry objects.
|
||||
* @param rootTrace
|
||||
* The first stack trace resolve or null.
|
||||
* @param stackTrace
|
||||
* The stack trace being resolved.
|
||||
* @return The StackTracePackageElement array.
|
||||
*/
|
||||
ExtendedStackTraceElement[] toExtendedStackTrace(final Stack<Class<?>> stack, final Map<String, CacheEntry> map,
|
||||
final StackTraceElement[] rootTrace, final StackTraceElement[] stackTrace) {
|
||||
int stackLength;
|
||||
if (rootTrace != null) {
|
||||
int rootIndex = rootTrace.length - 1;
|
||||
int stackIndex = stackTrace.length - 1;
|
||||
while (rootIndex >= 0 && stackIndex >= 0 && rootTrace[rootIndex].equals(stackTrace[stackIndex])) {
|
||||
--rootIndex;
|
||||
--stackIndex;
|
||||
}
|
||||
this.commonElementCount = stackTrace.length - 1 - stackIndex;
|
||||
stackLength = stackIndex + 1;
|
||||
} else {
|
||||
this.commonElementCount = 0;
|
||||
stackLength = stackTrace.length;
|
||||
}
|
||||
final ExtendedStackTraceElement[] extStackTrace = new ExtendedStackTraceElement[stackLength];
|
||||
Class<?> clazz = stack.isEmpty() ? null : stack.peek();
|
||||
ClassLoader lastLoader = null;
|
||||
for (int i = stackLength - 1; i >= 0; --i) {
|
||||
final StackTraceElement stackTraceElement = stackTrace[i];
|
||||
final String className = stackTraceElement.getClassName();
|
||||
// The stack returned from getCurrentStack may be missing entries for java.lang.reflect.Method.invoke()
|
||||
// and its implementation. The Throwable might also contain stack entries that are no longer
|
||||
// present as those methods have returned.
|
||||
ExtendedClassInfo extClassInfo;
|
||||
if (clazz != null && className.equals(clazz.getName())) {
|
||||
final CacheEntry entry = this.toCacheEntry(stackTraceElement, clazz, true);
|
||||
extClassInfo = entry.element;
|
||||
lastLoader = entry.loader;
|
||||
stack.pop();
|
||||
clazz = stack.isEmpty() ? null : stack.peek();
|
||||
} else {
|
||||
final CacheEntry cacheEntry = map.get(className);
|
||||
if (cacheEntry != null) {
|
||||
final CacheEntry entry = cacheEntry;
|
||||
extClassInfo = entry.element;
|
||||
if (entry.loader != null) {
|
||||
lastLoader = entry.loader;
|
||||
}
|
||||
} else {
|
||||
final CacheEntry entry = this.toCacheEntry(stackTraceElement,
|
||||
this.loadClass(lastLoader, className), false);
|
||||
extClassInfo = entry.element;
|
||||
map.put(stackTraceElement.toString(), entry);
|
||||
if (entry.loader != null) {
|
||||
lastLoader = entry.loader;
|
||||
}
|
||||
}
|
||||
}
|
||||
extStackTrace[i] = new ExtendedStackTraceElement(stackTraceElement, extClassInfo);
|
||||
}
|
||||
return extStackTrace;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final String msg = this.message;
|
||||
return msg != null ? this.name + ": " + msg : this.name;
|
||||
}
|
||||
|
||||
private ThrowableProxy[] toSuppressedProxies(final Throwable thrown, Set<Throwable> suppressedVisited) {
|
||||
try {
|
||||
final Throwable[] suppressed = thrown.getSuppressed();
|
||||
if (suppressed == null) {
|
||||
return EMPTY_THROWABLE_PROXY_ARRAY;
|
||||
}
|
||||
final List<ThrowableProxy> proxies = new ArrayList<>(suppressed.length);
|
||||
if (suppressedVisited == null) {
|
||||
suppressedVisited = new HashSet<>(proxies.size());
|
||||
}
|
||||
for (int i = 0; i < suppressed.length; i++) {
|
||||
final Throwable candidate = suppressed[i];
|
||||
if (!suppressedVisited.contains(candidate)) {
|
||||
suppressedVisited.add(candidate);
|
||||
proxies.add(new ThrowableProxy(candidate, suppressedVisited));
|
||||
}
|
||||
}
|
||||
return proxies.toArray(new ThrowableProxy[proxies.size()]);
|
||||
} catch (final Exception e) {
|
||||
StatusLogger.getLogger().error(e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
392
core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java
Normal file
392
core/src/main/java/org/apache/logging/log4j/core/jmx/Server.java
Normal file
@ -0,0 +1,392 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache license, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the license for the specific language governing permissions and
|
||||
* limitations under the license.
|
||||
*/
|
||||
package org.apache.logging.log4j.core.jmx;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import javax.management.InstanceAlreadyExistsException;
|
||||
import javax.management.MBeanRegistrationException;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.NotCompliantMBeanException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.core.Appender;
|
||||
import org.apache.logging.log4j.core.LoggerContext;
|
||||
import org.apache.logging.log4j.core.appender.AsyncAppender;
|
||||
import org.apache.logging.log4j.core.async.AsyncLoggerConfig;
|
||||
import org.apache.logging.log4j.core.async.AsyncLoggerContext;
|
||||
import org.apache.logging.log4j.core.async.DaemonThreadFactory;
|
||||
import org.apache.logging.log4j.core.config.LoggerConfig;
|
||||
import org.apache.logging.log4j.core.impl.Log4jContextFactory;
|
||||
import org.apache.logging.log4j.core.selector.ContextSelector;
|
||||
import org.apache.logging.log4j.core.util.Constants;
|
||||
import org.apache.logging.log4j.spi.LoggerContextFactory;
|
||||
import org.apache.logging.log4j.status.StatusLogger;
|
||||
import org.apache.logging.log4j.util.PropertiesUtil;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* Creates MBeans to instrument various classes in the log4j class hierarchy.
|
||||
* <p>
|
||||
* All instrumentation for Log4j 2 classes can be disabled by setting system property {@code -Dlog4j2.disable.jmx=true}.
|
||||
* </p>
|
||||
*/
|
||||
@SuppressForbidden(reason = "copied class to hack around Log4j bug")
|
||||
public final class Server {
|
||||
|
||||
/**
|
||||
* The domain part, or prefix ({@value}) of the {@code ObjectName} of all MBeans that instrument Log4J2 components.
|
||||
*/
|
||||
public static final String DOMAIN = "org.apache.logging.log4j2";
|
||||
private static final String PROPERTY_DISABLE_JMX = "log4j2.disable.jmx";
|
||||
private static final String PROPERTY_ASYNC_NOTIF = "log4j2.jmx.notify.async";
|
||||
private static final String THREAD_NAME_PREFIX = "log4j2.jmx.notif";
|
||||
private static final StatusLogger LOGGER = StatusLogger.getLogger();
|
||||
static final Executor executor = isJmxDisabled() ? null : createExecutor();
|
||||
|
||||
private Server() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns either a {@code null} Executor (causing JMX notifications to be sent from the caller thread) or a daemon
|
||||
* background thread Executor, depending on the value of system property "log4j2.jmx.notify.async". If this
|
||||
* property is not set, use a {@code null} Executor for web apps to avoid memory leaks and other issues when the
|
||||
* web app is restarted.
|
||||
* @see <a href="https://issues.apache.org/jira/browse/LOG4J2-938">LOG4J2-938</a>
|
||||
*/
|
||||
private static ExecutorService createExecutor() {
|
||||
final boolean defaultAsync = !Constants.IS_WEB_APP;
|
||||
final boolean async = PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_ASYNC_NOTIF, defaultAsync);
|
||||
return async ? Executors.newFixedThreadPool(1, new DaemonThreadFactory(THREAD_NAME_PREFIX)) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Either returns the specified name as is, or returns a quoted value containing the specified name with the special
|
||||
* characters (comma, equals, colon, quote, asterisk, or question mark) preceded with a backslash.
|
||||
*
|
||||
* @param name the name to escape so it can be used as a value in an {@link ObjectName}.
|
||||
* @return the escaped name
|
||||
*/
|
||||
public static String escape(final String name) {
|
||||
final StringBuilder sb = new StringBuilder(name.length() * 2);
|
||||
boolean needsQuotes = false;
|
||||
for (int i = 0; i < name.length(); i++) {
|
||||
final char c = name.charAt(i);
|
||||
switch (c) {
|
||||
case '\\':
|
||||
case '*':
|
||||
case '?':
|
||||
case '\"':
|
||||
// quote, star, question & backslash must be escaped
|
||||
sb.append('\\');
|
||||
needsQuotes = true; // ... and can only appear in quoted value
|
||||
break;
|
||||
case ',':
|
||||
case '=':
|
||||
case ':':
|
||||
// no need to escape these, but value must be quoted
|
||||
needsQuotes = true;
|
||||
break;
|
||||
case '\r':
|
||||
// drop \r characters: \\r gives "invalid escape sequence"
|
||||
continue;
|
||||
case '\n':
|
||||
// replace \n characters with \\n sequence
|
||||
sb.append("\\n");
|
||||
needsQuotes = true;
|
||||
continue;
|
||||
}
|
||||
sb.append(c);
|
||||
}
|
||||
if (needsQuotes) {
|
||||
sb.insert(0, '\"');
|
||||
sb.append('\"');
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static boolean isJmxDisabled() {
|
||||
return PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_DISABLE_JMX);
|
||||
}
|
||||
|
||||
public static void reregisterMBeansAfterReconfigure() {
|
||||
// avoid creating Platform MBean Server if JMX disabled
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for log4j2. Not registering MBeans.");
|
||||
return;
|
||||
}
|
||||
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
reregisterMBeansAfterReconfigure(mbs);
|
||||
}
|
||||
|
||||
public static void reregisterMBeansAfterReconfigure(final MBeanServer mbs) {
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for log4j2. Not registering MBeans.");
|
||||
return;
|
||||
}
|
||||
|
||||
// now provide instrumentation for the newly configured
|
||||
// LoggerConfigs and Appenders
|
||||
try {
|
||||
final ContextSelector selector = getContextSelector();
|
||||
if (selector == null) {
|
||||
LOGGER.debug("Could not register MBeans: no ContextSelector found.");
|
||||
return;
|
||||
}
|
||||
LOGGER.trace("Reregistering MBeans after reconfigure. Selector={}", selector);
|
||||
final List<LoggerContext> contexts = selector.getLoggerContexts();
|
||||
int i = 0;
|
||||
for (final LoggerContext ctx : contexts) {
|
||||
LOGGER.trace("Reregistering context ({}/{}): '{}' {}", ++i, contexts.size(), ctx.getName(), ctx);
|
||||
// first unregister the context and all nested loggers,
|
||||
// appenders, statusLogger, contextSelector, ringbuffers...
|
||||
unregisterLoggerContext(ctx.getName(), mbs);
|
||||
|
||||
final LoggerContextAdmin mbean = new LoggerContextAdmin(ctx, executor);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
|
||||
if (ctx instanceof AsyncLoggerContext) {
|
||||
final RingBufferAdmin rbmbean = ((AsyncLoggerContext) ctx).createRingBufferAdmin();
|
||||
if (rbmbean.getBufferSize() > 0) {
|
||||
// don't register if Disruptor not started (DefaultConfiguration: config not found)
|
||||
register(mbs, rbmbean, rbmbean.getObjectName());
|
||||
}
|
||||
}
|
||||
|
||||
// register the status logger and the context selector
|
||||
// repeatedly
|
||||
// for each known context: if one context is unregistered,
|
||||
// these MBeans should still be available for the other
|
||||
// contexts.
|
||||
registerStatusLogger(ctx.getName(), mbs, executor);
|
||||
registerContextSelector(ctx.getName(), selector, mbs, executor);
|
||||
|
||||
registerLoggerConfigs(ctx, mbs, executor);
|
||||
registerAppenders(ctx, mbs, executor);
|
||||
}
|
||||
} catch (final Exception ex) {
|
||||
LOGGER.error("Could not register mbeans", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister all log4j MBeans from the platform MBean server.
|
||||
*/
|
||||
public static void unregisterMBeans() {
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans.");
|
||||
return;
|
||||
}
|
||||
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
unregisterMBeans(mbs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister all log4j MBeans from the specified MBean server.
|
||||
*
|
||||
* @param mbs the MBean server to unregister from.
|
||||
*/
|
||||
public static void unregisterMBeans(final MBeanServer mbs) {
|
||||
unregisterStatusLogger("*", mbs);
|
||||
unregisterContextSelector("*", mbs);
|
||||
unregisterContexts(mbs);
|
||||
unregisterLoggerConfigs("*", mbs);
|
||||
unregisterAsyncLoggerRingBufferAdmins("*", mbs);
|
||||
unregisterAsyncLoggerConfigRingBufferAdmins("*", mbs);
|
||||
unregisterAppenders("*", mbs);
|
||||
unregisterAsyncAppenders("*", mbs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@code ContextSelector} of the current {@code Log4jContextFactory}.
|
||||
*
|
||||
* @return the {@code ContextSelector} of the current {@code Log4jContextFactory}
|
||||
*/
|
||||
private static ContextSelector getContextSelector() {
|
||||
final LoggerContextFactory factory = LogManager.getFactory();
|
||||
if (factory instanceof Log4jContextFactory) {
|
||||
final ContextSelector selector = ((Log4jContextFactory) factory).getSelector();
|
||||
return selector;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s
|
||||
* and {@code Appender}s from the platform MBean server.
|
||||
*
|
||||
* @param loggerContextName name of the logger context to unregister
|
||||
*/
|
||||
public static void unregisterLoggerContext(final String loggerContextName) {
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans.");
|
||||
return;
|
||||
}
|
||||
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
unregisterLoggerContext(loggerContextName, mbs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s
|
||||
* and {@code Appender}s from the platform MBean server.
|
||||
*
|
||||
* @param contextName name of the logger context to unregister
|
||||
* @param mbs the MBean Server to unregister the instrumented objects from
|
||||
*/
|
||||
public static void unregisterLoggerContext(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = LoggerContextAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs); // unregister context mbean
|
||||
|
||||
// now unregister all MBeans associated with this logger context
|
||||
unregisterStatusLogger(contextName, mbs);
|
||||
unregisterContextSelector(contextName, mbs);
|
||||
unregisterLoggerConfigs(contextName, mbs);
|
||||
unregisterAppenders(contextName, mbs);
|
||||
unregisterAsyncAppenders(contextName, mbs);
|
||||
unregisterAsyncLoggerRingBufferAdmins(contextName, mbs);
|
||||
unregisterAsyncLoggerConfigRingBufferAdmins(contextName, mbs);
|
||||
}
|
||||
|
||||
private static void registerStatusLogger(final String contextName, final MBeanServer mbs, final Executor executor)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final StatusLoggerAdmin mbean = new StatusLoggerAdmin(contextName, executor);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
}
|
||||
|
||||
private static void registerContextSelector(final String contextName, final ContextSelector selector,
|
||||
final MBeanServer mbs, final Executor executor) throws InstanceAlreadyExistsException,
|
||||
MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final ContextSelectorAdmin mbean = new ContextSelectorAdmin(contextName, selector);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
}
|
||||
|
||||
private static void unregisterStatusLogger(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = StatusLoggerAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterContextSelector(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = ContextSelectorAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterLoggerConfigs(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = LoggerConfigAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterContexts(final MBeanServer mbs) {
|
||||
final String pattern = LoggerContextAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAppenders(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = AppenderAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAsyncAppenders(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = AsyncAppenderAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAsyncLoggerRingBufferAdmins(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern1 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER;
|
||||
final String search1 = String.format(pattern1, escape(contextName));
|
||||
unregisterAllMatching(search1, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAsyncLoggerConfigRingBufferAdmins(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern2 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER_CONFIG;
|
||||
final String search2 = String.format(pattern2, escape(contextName), "*");
|
||||
unregisterAllMatching(search2, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAllMatching(final String search, final MBeanServer mbs) {
|
||||
try {
|
||||
final ObjectName pattern = new ObjectName(search);
|
||||
final Set<ObjectName> found = mbs.queryNames(pattern, null);
|
||||
if (found.isEmpty()) {
|
||||
LOGGER.trace("Unregistering but no MBeans found matching '{}'", search);
|
||||
} else {
|
||||
LOGGER.trace("Unregistering {} MBeans: {}", found.size(), found);
|
||||
}
|
||||
for (final ObjectName objectName : found) {
|
||||
mbs.unregisterMBean(objectName);
|
||||
}
|
||||
} catch (final Exception ex) {
|
||||
LOGGER.error("Could not unregister MBeans for " + search, ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static void registerLoggerConfigs(final LoggerContext ctx, final MBeanServer mbs, final Executor executor)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final Map<String, LoggerConfig> map = ctx.getConfiguration().getLoggers();
|
||||
for (final String name : map.keySet()) {
|
||||
final LoggerConfig cfg = map.get(name);
|
||||
final LoggerConfigAdmin mbean = new LoggerConfigAdmin(ctx, cfg);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
|
||||
if (cfg instanceof AsyncLoggerConfig) {
|
||||
final AsyncLoggerConfig async = (AsyncLoggerConfig) cfg;
|
||||
final RingBufferAdmin rbmbean = async.createRingBufferAdmin(ctx.getName());
|
||||
register(mbs, rbmbean, rbmbean.getObjectName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void registerAppenders(final LoggerContext ctx, final MBeanServer mbs, final Executor executor)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final Map<String, Appender> map = ctx.getConfiguration().getAppenders();
|
||||
for (final String name : map.keySet()) {
|
||||
final Appender appender = map.get(name);
|
||||
|
||||
if (appender instanceof AsyncAppender) {
|
||||
final AsyncAppender async = ((AsyncAppender) appender);
|
||||
final AsyncAppenderAdmin mbean = new AsyncAppenderAdmin(ctx.getName(), async);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
} else {
|
||||
final AppenderAdmin mbean = new AppenderAdmin(ctx.getName(), appender);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void register(final MBeanServer mbs, final Object mbean, final ObjectName objectName)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
LOGGER.debug("Registering MBean {}", objectName);
|
||||
mbs.registerMBean(mbean, objectName);
|
||||
}
|
||||
}
|
@ -19,11 +19,11 @@
|
||||
|
||||
package org.apache.lucene.analysis.miscellaneous;
|
||||
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.util.CharArraySet;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -102,7 +102,6 @@ public class MapperQueryParser extends QueryParser {
|
||||
setLowercaseExpandedTerms(settings.lowercaseExpandedTerms());
|
||||
setPhraseSlop(settings.phraseSlop());
|
||||
setDefaultOperator(settings.defaultOperator());
|
||||
setFuzzyMinSim(settings.fuzziness().asFloat());
|
||||
setFuzzyPrefixLength(settings.fuzzyPrefixLength());
|
||||
setLocale(settings.locale());
|
||||
}
|
||||
@ -114,7 +113,7 @@ public class MapperQueryParser extends QueryParser {
|
||||
@Override
|
||||
Query handleBareFuzzy(String qfield, Token fuzzySlop, String termImage) throws ParseException {
|
||||
if (fuzzySlop.image.length() == 1) {
|
||||
return getFuzzyQuery(qfield, termImage, Float.toString(fuzzyMinSim));
|
||||
return getFuzzyQuery(qfield, termImage, Float.toString(settings.fuzziness().asDistance(termImage)));
|
||||
}
|
||||
return getFuzzyQuery(qfield, termImage, fuzzySlop.image.substring(1));
|
||||
}
|
||||
|
@ -68,14 +68,14 @@ public class StoreRateLimiting {
|
||||
}
|
||||
|
||||
public void setMaxRate(ByteSizeValue rate) {
|
||||
if (rate.bytes() <= 0) {
|
||||
if (rate.getBytes() <= 0) {
|
||||
actualRateLimiter = null;
|
||||
} else if (actualRateLimiter == null) {
|
||||
actualRateLimiter = rateLimiter;
|
||||
actualRateLimiter.setMBPerSec(rate.mbFrac());
|
||||
actualRateLimiter.setMBPerSec(rate.getMbFrac());
|
||||
} else {
|
||||
assert rateLimiter == actualRateLimiter;
|
||||
rateLimiter.setMBPerSec(rate.mbFrac());
|
||||
rateLimiter.setMBPerSec(rate.getMbFrac());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -101,7 +102,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
public ElasticsearchException(StreamInput in) throws IOException {
|
||||
super(in.readOptionalString(), in.readException());
|
||||
readStackTrace(this, in);
|
||||
headers.putAll(in.readMapOfLists());
|
||||
headers.putAll(in.readMapOfLists(StreamInput::readString, StreamInput::readString));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -196,7 +197,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
out.writeOptionalString(this.getMessage());
|
||||
out.writeException(this.getCause());
|
||||
writeStackTraces(this, out);
|
||||
out.writeMapOfLists(headers);
|
||||
out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);
|
||||
}
|
||||
|
||||
public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
|
||||
@ -632,8 +633,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
|
||||
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
|
||||
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
|
||||
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
|
||||
org.elasticsearch.index.engine.FlushNotAllowedEngineException::new, 110),
|
||||
// 110 used to be FlushNotAllowedEngineException
|
||||
NO_CLASS_SETTINGS_EXCEPTION(org.elasticsearch.common.settings.NoClassSettingsException.class,
|
||||
org.elasticsearch.common.settings.NoClassSettingsException::new, 111),
|
||||
BIND_TRANSPORT_EXCEPTION(org.elasticsearch.transport.BindTransportException.class,
|
||||
|
@ -19,12 +19,12 @@
|
||||
|
||||
package org.elasticsearch;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
@ -39,7 +39,7 @@ import java.util.Set;
|
||||
|
||||
public final class ExceptionsHelper {
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
|
||||
private static final Logger logger = Loggers.getLogger(ExceptionsHelper.class);
|
||||
|
||||
public static RuntimeException convertToRuntime(Exception e) {
|
||||
if (e instanceof RuntimeException) {
|
||||
|
@ -73,6 +73,8 @@ public class Version {
|
||||
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_5_ID = 2030599;
|
||||
public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_4_0_ID = 2040099;
|
||||
public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
@ -83,9 +85,13 @@ public class Version {
|
||||
public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final int V_5_0_0_alpha5_ID = 5000005;
|
||||
public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final int V_5_0_0_alpha6_ID = 5000006;
|
||||
public static final Version V_5_0_0_alpha6 = new Version(V_5_0_0_alpha6_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha6;
|
||||
public static final int V_5_0_0_beta1_ID = 5000026;
|
||||
public static final Version V_5_0_0_beta1 = new Version(V_5_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final int V_5_0_0_rc1_ID = 5000051;
|
||||
public static final Version V_5_0_0_rc1 = new Version(V_5_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha1;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
@ -98,8 +104,12 @@ public class Version {
|
||||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_5_0_0_alpha6_ID:
|
||||
return V_5_0_0_alpha6;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_0_0_rc1_ID:
|
||||
return V_5_0_0_rc1;
|
||||
case V_5_0_0_beta1_ID:
|
||||
return V_5_0_0_beta1;
|
||||
case V_5_0_0_alpha5_ID:
|
||||
return V_5_0_0_alpha5;
|
||||
case V_5_0_0_alpha4_ID:
|
||||
@ -110,6 +120,8 @@ public class Version {
|
||||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_4_0_ID:
|
||||
return V_2_4_0;
|
||||
case V_2_3_5_ID:
|
||||
return V_2_3_5;
|
||||
case V_2_3_4_ID:
|
||||
@ -344,4 +356,9 @@ public class Version {
|
||||
public boolean isRC() {
|
||||
return build > 50 && build < 99;
|
||||
}
|
||||
|
||||
public boolean isRelease() {
|
||||
return build == 99;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -289,6 +288,7 @@ import org.elasticsearch.rest.action.cat.RestSegmentsAction;
|
||||
import org.elasticsearch.rest.action.cat.RestShardsAction;
|
||||
import org.elasticsearch.rest.action.cat.RestSnapshotAction;
|
||||
import org.elasticsearch.rest.action.cat.RestTasksAction;
|
||||
import org.elasticsearch.rest.action.cat.RestTemplatesAction;
|
||||
import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
|
||||
import org.elasticsearch.rest.action.document.RestBulkAction;
|
||||
import org.elasticsearch.rest.action.document.RestDeleteAction;
|
||||
@ -335,7 +335,7 @@ public class ActionModule extends AbstractModule {
|
||||
this.actionPlugins = actionPlugins;
|
||||
actions = setupActions(actionPlugins);
|
||||
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
|
||||
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
|
||||
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, clusterSettings, resolver);
|
||||
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
|
||||
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
|
||||
restController = new RestController(settings, headers);
|
||||
@ -604,6 +604,7 @@ public class ActionModule extends AbstractModule {
|
||||
registerRestHandler(handlers, RestNodeAttrsAction.class);
|
||||
registerRestHandler(handlers, RestRepositoriesAction.class);
|
||||
registerRestHandler(handlers, RestSnapshotAction.class);
|
||||
registerRestHandler(handlers, RestTemplatesAction.class);
|
||||
for (ActionPlugin plugin : actionPlugins) {
|
||||
for (Class<? extends RestHandler> handler : plugin.getRestHandlers()) {
|
||||
registerRestHandler(handlers, handler);
|
||||
@ -664,4 +665,8 @@ public class ActionModule extends AbstractModule {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public RestController getRestController() {
|
||||
return restController;
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
@ -168,31 +169,35 @@ public class TransportClusterAllocationExplainAction
|
||||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.primary() == false &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
nodeDecision.type() != Decision.Type.YES) {
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && nodeDecision.type() != Decision.Type.YES) {
|
||||
finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
|
||||
" decision and the shard's state is still being fetched";
|
||||
" decision";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (hasPendingAsyncFetch &&
|
||||
shard.unassigned() &&
|
||||
shard.allocatedPostIndexCreate(indexMetaData)) {
|
||||
} else if (shard.unassigned() && shard.primary() == false &&
|
||||
shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED && hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
} else if (shard.primary() && shard.unassigned() &&
|
||||
(shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE ||
|
||||
shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT)
|
||||
&& hasPendingAsyncFetch) {
|
||||
finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) {
|
||||
finalExplanation = "there is no copy of the shard available";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
finalExplanation = "the copy of the shard is corrupt";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
} else if (shard.primary() && shard.unassigned() && shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
finalExplanation = "the copy of the shard cannot be read";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else {
|
||||
@ -258,7 +263,7 @@ public class TransportClusterAllocationExplainAction
|
||||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()),
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.inSyncAllocationIds(shard.getId()),
|
||||
allocation.hasPendingAsyncFetch());
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
@ -41,8 +42,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
||||
private String[] indices;
|
||||
private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
|
||||
private ClusterHealthStatus waitForStatus;
|
||||
private int waitForRelocatingShards = -1;
|
||||
private int waitForActiveShards = -1;
|
||||
private boolean waitForNoRelocatingShards = false;
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE;
|
||||
private String waitForNodes = "";
|
||||
private Priority waitForEvents = null;
|
||||
|
||||
@ -102,24 +103,52 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
||||
return waitForStatus(ClusterHealthStatus.YELLOW);
|
||||
}
|
||||
|
||||
public int waitForRelocatingShards() {
|
||||
return waitForRelocatingShards;
|
||||
public boolean waitForNoRelocatingShards() {
|
||||
return waitForNoRelocatingShards;
|
||||
}
|
||||
|
||||
public ClusterHealthRequest waitForRelocatingShards(int waitForRelocatingShards) {
|
||||
this.waitForRelocatingShards = waitForRelocatingShards;
|
||||
/**
|
||||
* Sets whether the request should wait for there to be no relocating shards before
|
||||
* retrieving the cluster health status. Defaults to {@code false}, meaning the
|
||||
* operation does not wait on there being no more relocating shards. Set to <code>true</code>
|
||||
* to wait until the number of relocating shards in the cluster is 0.
|
||||
*/
|
||||
public ClusterHealthRequest waitForNoRelocatingShards(boolean waitForNoRelocatingShards) {
|
||||
this.waitForNoRelocatingShards = waitForNoRelocatingShards;
|
||||
return this;
|
||||
}
|
||||
|
||||
public int waitForActiveShards() {
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
|
||||
public ClusterHealthRequest waitForActiveShards(int waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
/**
|
||||
* Sets the number of shard copies that must be active across all indices before getting the
|
||||
* health status. Defaults to {@link ActiveShardCount#NONE}, meaning we don't wait on any active shards.
|
||||
* Set this value to {@link ActiveShardCount#ALL} to wait for all shards (primary and
|
||||
* all replicas) to be active across all indices in the cluster. Otherwise, use
|
||||
* {@link ActiveShardCount#from(int)} to set this value to any non-negative integer, up to the
|
||||
* total number of shard copies to wait for.
|
||||
*/
|
||||
public ClusterHealthRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
if (waitForActiveShards.equals(ActiveShardCount.DEFAULT)) {
|
||||
// the default for cluster health request is 0, not 1
|
||||
this.waitForActiveShards = ActiveShardCount.NONE;
|
||||
} else {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for {@link #waitForActiveShards(ActiveShardCount)} where the numerical
|
||||
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
|
||||
* to get the ActiveShardCount.
|
||||
*/
|
||||
public ClusterHealthRequest waitForActiveShards(final int waitForActiveShards) {
|
||||
return waitForActiveShards(ActiveShardCount.from(waitForActiveShards));
|
||||
}
|
||||
|
||||
public String waitForNodes() {
|
||||
return waitForNodes;
|
||||
}
|
||||
@ -162,8 +191,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
||||
if (in.readBoolean()) {
|
||||
waitForStatus = ClusterHealthStatus.fromValue(in.readByte());
|
||||
}
|
||||
waitForRelocatingShards = in.readInt();
|
||||
waitForActiveShards = in.readInt();
|
||||
waitForNoRelocatingShards = in.readBoolean();
|
||||
waitForActiveShards = ActiveShardCount.readFrom(in);
|
||||
waitForNodes = in.readString();
|
||||
if (in.readBoolean()) {
|
||||
waitForEvents = Priority.readFrom(in);
|
||||
@ -188,8 +217,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
||||
out.writeBoolean(true);
|
||||
out.writeByte(waitForStatus.value());
|
||||
}
|
||||
out.writeInt(waitForRelocatingShards);
|
||||
out.writeInt(waitForActiveShards);
|
||||
out.writeBoolean(waitForNoRelocatingShards);
|
||||
waitForActiveShards.writeTo(out);
|
||||
out.writeString(waitForNodes);
|
||||
if (waitForEvents == null) {
|
||||
out.writeBoolean(false);
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
@ -64,11 +65,40 @@ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestB
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterHealthRequestBuilder setWaitForRelocatingShards(int waitForRelocatingShards) {
|
||||
request.waitForRelocatingShards(waitForRelocatingShards);
|
||||
/**
|
||||
* Sets whether the request should wait for there to be no relocating shards before
|
||||
* retrieving the cluster health status. Defaults to <code>false</code>, meaning the
|
||||
* operation does not wait on there being no more relocating shards. Set to <code>true</code>
|
||||
* to wait until the number of relocating shards in the cluster is 0.
|
||||
*/
|
||||
public ClusterHealthRequestBuilder setWaitForNoRelocatingShards(boolean waitForRelocatingShards) {
|
||||
request.waitForNoRelocatingShards(waitForRelocatingShards);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that must be active before getting the health status.
|
||||
* Defaults to {@link ActiveShardCount#NONE}, meaning we don't wait on any active shards.
|
||||
* Set this value to {@link ActiveShardCount#ALL} to wait for all shards (primary and
|
||||
* all replicas) to be active across all indices in the cluster. Otherwise, use
|
||||
* {@link ActiveShardCount#from(int)} to set this value to any non-negative integer, up to the
|
||||
* total number of shard copies that would exist across all indices in the cluster.
|
||||
*/
|
||||
public ClusterHealthRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
if (waitForActiveShards.equals(ActiveShardCount.DEFAULT)) {
|
||||
// the default for cluster health is 0, not 1
|
||||
request.waitForActiveShards(ActiveShardCount.NONE);
|
||||
} else {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
|
||||
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
|
||||
* to get the ActiveShardCount.
|
||||
*/
|
||||
public ClusterHealthRequestBuilder setWaitForActiveShards(int waitForActiveShards) {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
|
@ -19,8 +19,11 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -105,7 +108,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error("unexpected failure during [{}]", e, source);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@ -125,10 +128,10 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
if (request.waitForStatus() == null) {
|
||||
waitFor--;
|
||||
}
|
||||
if (request.waitForRelocatingShards() == -1) {
|
||||
if (request.waitForNoRelocatingShards() == false) {
|
||||
waitFor--;
|
||||
}
|
||||
if (request.waitForActiveShards() == -1) {
|
||||
if (request.waitForActiveShards().equals(ActiveShardCount.NONE)) {
|
||||
waitFor--;
|
||||
}
|
||||
if (request.waitForNodes().isEmpty()) {
|
||||
@ -203,11 +206,22 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
if (request.waitForStatus() != null && response.getStatus().value() <= request.waitForStatus().value()) {
|
||||
waitForCounter++;
|
||||
}
|
||||
if (request.waitForRelocatingShards() != -1 && response.getRelocatingShards() <= request.waitForRelocatingShards()) {
|
||||
if (request.waitForNoRelocatingShards() && response.getRelocatingShards() == 0) {
|
||||
waitForCounter++;
|
||||
}
|
||||
if (request.waitForActiveShards() != -1 && response.getActiveShards() >= request.waitForActiveShards()) {
|
||||
waitForCounter++;
|
||||
if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) {
|
||||
ActiveShardCount waitForActiveShards = request.waitForActiveShards();
|
||||
assert waitForActiveShards.equals(ActiveShardCount.DEFAULT) == false :
|
||||
"waitForActiveShards must not be DEFAULT on the request object, instead it should be NONE";
|
||||
if (waitForActiveShards.equals(ActiveShardCount.ALL)
|
||||
&& response.getUnassignedShards() == 0
|
||||
&& response.getInitializingShards() == 0) {
|
||||
// if we are waiting for all shards to be active, then the num of unassigned and num of initializing shards must be 0
|
||||
waitForCounter++;
|
||||
} else if (waitForActiveShards.enoughShardsActive(response.getActiveShards())) {
|
||||
// there are enough active shards to meet the requirements of the request
|
||||
waitForCounter++;
|
||||
}
|
||||
}
|
||||
if (request.indices() != null && request.indices().length > 0) {
|
||||
try {
|
||||
|
@ -37,10 +37,6 @@ import org.elasticsearch.threadpool.ThreadPoolInfo;
|
||||
import org.elasticsearch.transport.TransportInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
* Node information (static, does not change over time).
|
||||
@ -85,8 +81,8 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest,
|
||||
@Nullable ByteSizeValue totalIndexingBuffer) {
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins,
|
||||
@Nullable IngestInfo ingest, @Nullable ByteSizeValue totalIndexingBuffer) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
@ -205,31 +201,14 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
if (in.readBoolean()) {
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
os = OsInfo.readOsInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
process = ProcessInfo.readProcessInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
jvm = JvmInfo.readJvmInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
threadPool = ThreadPoolInfo.readThreadPoolInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
transport = TransportInfo.readTransportInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
http = HttpInfo.readHttpInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
plugins = new PluginsAndModules();
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
ingest = new IngestInfo(in);
|
||||
}
|
||||
os = in.readOptionalWriteable(OsInfo::new);
|
||||
process = in.readOptionalWriteable(ProcessInfo::new);
|
||||
jvm = in.readOptionalWriteable(JvmInfo::new);
|
||||
threadPool = in.readOptionalWriteable(ThreadPoolInfo::new);
|
||||
transport = in.readOptionalWriteable(TransportInfo::new);
|
||||
http = in.readOptionalWriteable(HttpInfo::new);
|
||||
plugins = in.readOptionalWriteable(PluginsAndModules::new);
|
||||
ingest = in.readOptionalWriteable(IngestInfo::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -241,7 +220,7 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeLong(totalIndexingBuffer.bytes());
|
||||
out.writeLong(totalIndexingBuffer.getBytes());
|
||||
}
|
||||
if (settings == null) {
|
||||
out.writeBoolean(false);
|
||||
@ -249,53 +228,13 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
out.writeBoolean(true);
|
||||
Settings.writeSettingsToStream(settings, out);
|
||||
}
|
||||
if (os == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
os.writeTo(out);
|
||||
}
|
||||
if (process == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
process.writeTo(out);
|
||||
}
|
||||
if (jvm == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
jvm.writeTo(out);
|
||||
}
|
||||
if (threadPool == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
threadPool.writeTo(out);
|
||||
}
|
||||
if (transport == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
transport.writeTo(out);
|
||||
}
|
||||
if (http == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
http.writeTo(out);
|
||||
}
|
||||
if (plugins == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
plugins.writeTo(out);
|
||||
}
|
||||
if (ingest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
ingest.writeTo(out);
|
||||
}
|
||||
out.writeOptionalWriteable(os);
|
||||
out.writeOptionalWriteable(process);
|
||||
out.writeOptionalWriteable(jvm);
|
||||
out.writeOptionalWriteable(threadPool);
|
||||
out.writeOptionalWriteable(transport);
|
||||
out.writeOptionalWriteable(http);
|
||||
out.writeOptionalWriteable(plugins);
|
||||
out.writeOptionalWriteable(ingest);
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.plugins.PluginInfo;
|
||||
@ -34,13 +34,24 @@ import java.util.List;
|
||||
/**
|
||||
* Information about plugins and modules
|
||||
*/
|
||||
public class PluginsAndModules implements Streamable, ToXContent {
|
||||
private List<PluginInfo> plugins;
|
||||
private List<PluginInfo> modules;
|
||||
public class PluginsAndModules implements Writeable, ToXContent {
|
||||
private final List<PluginInfo> plugins;
|
||||
private final List<PluginInfo> modules;
|
||||
|
||||
public PluginsAndModules() {
|
||||
plugins = new ArrayList<>();
|
||||
modules = new ArrayList<>();
|
||||
public PluginsAndModules(List<PluginInfo> plugins, List<PluginInfo> modules) {
|
||||
this.plugins = Collections.unmodifiableList(plugins);
|
||||
this.modules = Collections.unmodifiableList(modules);
|
||||
}
|
||||
|
||||
public PluginsAndModules(StreamInput in) throws IOException {
|
||||
this.plugins = Collections.unmodifiableList(in.readList(PluginInfo::new));
|
||||
this.modules = Collections.unmodifiableList(in.readList(PluginInfo::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeList(plugins);
|
||||
out.writeList(modules);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -69,33 +80,6 @@ public class PluginsAndModules implements Streamable, ToXContent {
|
||||
modules.add(info);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (plugins.isEmpty() == false || modules.isEmpty() == false) {
|
||||
throw new IllegalStateException("instance is already populated");
|
||||
}
|
||||
int plugins_size = in.readInt();
|
||||
for (int i = 0; i < plugins_size; i++) {
|
||||
plugins.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
int modules_size = in.readInt();
|
||||
for (int i = 0; i < modules_size; i++) {
|
||||
modules.add(PluginInfo.readFromStream(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(plugins.size());
|
||||
for (PluginInfo plugin : getPluginInfos()) {
|
||||
plugin.writeTo(out);
|
||||
}
|
||||
out.writeInt(modules.size());
|
||||
for (PluginInfo module : getModuleInfos()) {
|
||||
module.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startArray("plugins");
|
||||
|
@ -211,30 +211,16 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
||||
if (in.readBoolean()) {
|
||||
indices = NodeIndicesStats.readIndicesStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
os = OsStats.readOsStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
process = ProcessStats.readProcessStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
jvm = JvmStats.readJvmStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
threadPool = ThreadPoolStats.readThreadPoolStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
fs = new FsInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
transport = TransportStats.readTransportStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
http = HttpStats.readHttpStats(in);
|
||||
}
|
||||
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
|
||||
scriptStats = in.readOptionalStreamable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
|
||||
os = in.readOptionalWriteable(OsStats::new);
|
||||
process = in.readOptionalWriteable(ProcessStats::new);
|
||||
jvm = in.readOptionalWriteable(JvmStats::new);
|
||||
threadPool = in.readOptionalWriteable(ThreadPoolStats::new);
|
||||
fs = in.readOptionalWriteable(FsInfo::new);
|
||||
transport = in.readOptionalWriteable(TransportStats::new);
|
||||
http = in.readOptionalWriteable(HttpStats::new);
|
||||
breaker = in.readOptionalWriteable(AllCircuitBreakerStats::new);
|
||||
scriptStats = in.readOptionalWriteable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalWriteable(DiscoveryStats::new);
|
||||
ingestStats = in.readOptionalWriteable(IngestStats::new);
|
||||
}
|
||||
|
||||
@ -248,51 +234,16 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
||||
out.writeBoolean(true);
|
||||
indices.writeTo(out);
|
||||
}
|
||||
if (os == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
os.writeTo(out);
|
||||
}
|
||||
if (process == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
process.writeTo(out);
|
||||
}
|
||||
if (jvm == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
jvm.writeTo(out);
|
||||
}
|
||||
if (threadPool == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
threadPool.writeTo(out);
|
||||
}
|
||||
if (fs == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
fs.writeTo(out);
|
||||
}
|
||||
if (transport == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
transport.writeTo(out);
|
||||
}
|
||||
if (http == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
http.writeTo(out);
|
||||
}
|
||||
out.writeOptionalStreamable(breaker);
|
||||
out.writeOptionalStreamable(scriptStats);
|
||||
out.writeOptionalStreamable(discoveryStats);
|
||||
out.writeOptionalWriteable(os);
|
||||
out.writeOptionalWriteable(process);
|
||||
out.writeOptionalWriteable(jvm);
|
||||
out.writeOptionalWriteable(threadPool);
|
||||
out.writeOptionalWriteable(fs);
|
||||
out.writeOptionalWriteable(transport);
|
||||
out.writeOptionalWriteable(http);
|
||||
out.writeOptionalWriteable(breaker);
|
||||
out.writeOptionalWriteable(scriptStats);
|
||||
out.writeOptionalWriteable(discoveryStats);
|
||||
out.writeOptionalWriteable(ingestStats);
|
||||
}
|
||||
|
||||
@ -318,11 +269,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
|
||||
if (getIndices() != null) {
|
||||
getIndices().toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (getOs() != null) {
|
||||
getOs().toXContent(builder, params);
|
||||
}
|
||||
@ -350,15 +299,12 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
||||
if (getScriptStats() != null) {
|
||||
getScriptStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (getDiscoveryStats() != null) {
|
||||
getDiscoveryStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (getIngestStats() != null) {
|
||||
getIngestStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
indices = CommonStatsFlags.readCommonStatsFlags(in);
|
||||
indices = new CommonStatsFlags(in);
|
||||
os = in.readBoolean();
|
||||
process = in.readBoolean();
|
||||
jvm = in.readBoolean();
|
||||
@ -298,5 +298,4 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
||||
out.writeBoolean(discovery);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -216,7 +216,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
||||
public void onFailure(Exception e) {
|
||||
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
|
||||
// We haven't yet created the index for the task results so it can't be found.
|
||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or stored its results", e,
|
||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e,
|
||||
request.getTaskId()));
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
|
@ -19,6 +19,9 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -28,12 +31,10 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
@ -77,13 +78,13 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
||||
|
||||
private final ClusterRerouteRequest request;
|
||||
private final ActionListener<ClusterRerouteResponse> listener;
|
||||
private final ESLogger logger;
|
||||
private final Logger logger;
|
||||
private final AllocationService allocationService;
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
||||
ClusterRerouteResponseAckedClusterStateUpdateTask(ESLogger logger, AllocationService allocationService, ClusterRerouteRequest request,
|
||||
ActionListener<ClusterRerouteResponse> listener) {
|
||||
ClusterRerouteResponseAckedClusterStateUpdateTask(Logger logger, AllocationService allocationService, ClusterRerouteRequest request,
|
||||
ActionListener<ClusterRerouteResponse> listener) {
|
||||
super(Priority.IMMEDIATE, request, listener);
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
@ -103,21 +104,20 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, request.getCommands(), request.explain(),
|
||||
request.isRetryFailed());
|
||||
ClusterState newState = ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
clusterStateToSend = newState;
|
||||
explanations = routingResult.explanations();
|
||||
AllocationService.CommandsResult commandsResult =
|
||||
allocationService.reroute(currentState, request.getCommands(), request.explain(), request.isRetryFailed());
|
||||
clusterStateToSend = commandsResult.getClusterState();
|
||||
explanations = commandsResult.explanations();
|
||||
if (request.dryRun()) {
|
||||
return currentState;
|
||||
}
|
||||
return newState;
|
||||
return commandsResult.getClusterState();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.settings;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
@ -31,7 +33,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
@ -148,25 +149,21 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
//if the reroute fails we only log
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
// now, reroute in case things that require it changed (e.g. number of replicas)
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(currentState, "reroute after cluster update settings");
|
||||
if (!routingResult.changed()) {
|
||||
return currentState;
|
||||
}
|
||||
return ClusterState.builder(currentState).routingResult(routingResult).build();
|
||||
return allocationService.reroute(currentState, "reroute after cluster update settings");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to perform [{}]", source), e);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,8 @@ public class SnapshotsStatusRequest extends MasterNodeRequest<SnapshotsStatusReq
|
||||
|
||||
private String[] snapshots = Strings.EMPTY_ARRAY;
|
||||
|
||||
private boolean ignoreUnavailable;
|
||||
|
||||
public SnapshotsStatusRequest() {
|
||||
}
|
||||
|
||||
@ -112,11 +114,33 @@ public class SnapshotsStatusRequest extends MasterNodeRequest<SnapshotsStatusReq
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to <code>true</code> to ignore unavailable snapshots, instead of throwing an exception.
|
||||
* Defaults to <code>false</code>, which means unavailable snapshots cause an exception to be thrown.
|
||||
*
|
||||
* @param ignoreUnavailable whether to ignore unavailable snapshots
|
||||
* @return this request
|
||||
*/
|
||||
public SnapshotsStatusRequest ignoreUnavailable(boolean ignoreUnavailable) {
|
||||
this.ignoreUnavailable = ignoreUnavailable;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the request permits unavailable snapshots to be ignored.
|
||||
*
|
||||
* @return true if the request will ignore unavailable snapshots, false if it will throw an exception on unavailable snapshots
|
||||
*/
|
||||
public boolean ignoreUnavailable() {
|
||||
return ignoreUnavailable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
repository = in.readString();
|
||||
snapshots = in.readStringArray();
|
||||
ignoreUnavailable = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -124,5 +148,6 @@ public class SnapshotsStatusRequest extends MasterNodeRequest<SnapshotsStatusReq
|
||||
super.writeTo(out);
|
||||
out.writeString(repository);
|
||||
out.writeStringArray(snapshots);
|
||||
out.writeBoolean(ignoreUnavailable);
|
||||
}
|
||||
}
|
||||
|
@ -74,4 +74,16 @@ public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBui
|
||||
request.snapshots(ArrayUtils.concat(request.snapshots(), snapshots));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to <code>true</code> to ignore unavailable snapshots, instead of throwing an exception.
|
||||
* Defaults to <code>false</code>, which means unavailable snapshots cause an exception to be thrown.
|
||||
*
|
||||
* @param ignoreUnavailable whether to ignore unavailable snapshots.
|
||||
* @return this builder
|
||||
*/
|
||||
public SnapshotsStatusRequestBuilder setIgnoreUnavailable(boolean ignoreUnavailable) {
|
||||
request.ignoreUnavailable(ignoreUnavailable);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -214,7 +214,14 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
||||
SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName);
|
||||
if (snapshotId == null) {
|
||||
// neither in the current snapshot entries nor found in the repository
|
||||
throw new SnapshotMissingException(repositoryName, snapshotName);
|
||||
if (request.ignoreUnavailable()) {
|
||||
// ignoring unavailable snapshots, so skip over
|
||||
logger.debug("snapshot status request ignoring snapshot [{}], not found in repository [{}]",
|
||||
snapshotName, repositoryName);
|
||||
continue;
|
||||
} else {
|
||||
throw new SnapshotMissingException(repositoryName, snapshotName);
|
||||
}
|
||||
}
|
||||
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
|
@ -21,11 +21,12 @@ package org.elasticsearch.action.admin.cluster.stats;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
@ -39,11 +40,13 @@ import org.elasticsearch.plugins.PluginInfo;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class ClusterStatsNodes implements ToXContent {
|
||||
|
||||
@ -54,6 +57,7 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
private final JvmStats jvm;
|
||||
private final FsInfo.Path fs;
|
||||
private final Set<PluginInfo> plugins;
|
||||
private final NetworkTypes networkTypes;
|
||||
|
||||
ClusterStatsNodes(List<ClusterStatsNodeResponse> nodeResponses) {
|
||||
this.versions = new HashSet<>();
|
||||
@ -79,13 +83,14 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
continue;
|
||||
}
|
||||
if (nodeResponse.nodeStats().getFs() != null) {
|
||||
this.fs.add(nodeResponse.nodeStats().getFs().total());
|
||||
this.fs.add(nodeResponse.nodeStats().getFs().getTotal());
|
||||
}
|
||||
}
|
||||
this.counts = new Counts(nodeInfos);
|
||||
this.os = new OsStats(nodeInfos);
|
||||
this.os = new OsStats(nodeInfos, nodeStats);
|
||||
this.process = new ProcessStats(nodeStats);
|
||||
this.jvm = new JvmStats(nodeInfos, nodeStats);
|
||||
this.networkTypes = new NetworkTypes(nodeInfos);
|
||||
}
|
||||
|
||||
public Counts getCounts() {
|
||||
@ -124,6 +129,7 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
static final String JVM = "jvm";
|
||||
static final String FS = "fs";
|
||||
static final String PLUGINS = "plugins";
|
||||
static final String NETWORK_TYPES = "network_types";
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -158,6 +164,10 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
pluginInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject(Fields.NETWORK_TYPES);
|
||||
networkTypes.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@ -216,11 +226,12 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
final int availableProcessors;
|
||||
final int allocatedProcessors;
|
||||
final ObjectIntHashMap<String> names;
|
||||
final org.elasticsearch.monitor.os.OsStats.Mem mem;
|
||||
|
||||
/**
|
||||
* Build the stats from information about each node.
|
||||
*/
|
||||
private OsStats(List<NodeInfo> nodeInfos) {
|
||||
private OsStats(List<NodeInfo> nodeInfos, List<NodeStats> nodeStatsList) {
|
||||
this.names = new ObjectIntHashMap<>();
|
||||
int availableProcessors = 0;
|
||||
int allocatedProcessors = 0;
|
||||
@ -234,6 +245,22 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
}
|
||||
this.availableProcessors = availableProcessors;
|
||||
this.allocatedProcessors = allocatedProcessors;
|
||||
|
||||
long totalMemory = 0;
|
||||
long freeMemory = 0;
|
||||
for (NodeStats nodeStats : nodeStatsList) {
|
||||
if (nodeStats.getOs() != null) {
|
||||
long total = nodeStats.getOs().getMem().getTotal().getBytes();
|
||||
if (total > 0) {
|
||||
totalMemory += total;
|
||||
}
|
||||
long free = nodeStats.getOs().getMem().getFree().getBytes();
|
||||
if (free > 0) {
|
||||
freeMemory += free;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.mem = new org.elasticsearch.monitor.os.OsStats.Mem(totalMemory, freeMemory);
|
||||
}
|
||||
|
||||
public int getAvailableProcessors() {
|
||||
@ -244,6 +271,10 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
return allocatedProcessors;
|
||||
}
|
||||
|
||||
public org.elasticsearch.monitor.os.OsStats.Mem getMem() {
|
||||
return mem;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String AVAILABLE_PROCESSORS = "available_processors";
|
||||
static final String ALLOCATED_PROCESSORS = "allocated_processors";
|
||||
@ -264,6 +295,7 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
mem.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
@ -391,8 +423,8 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
}
|
||||
maxUptime = Math.max(maxUptime, js.getUptime().millis());
|
||||
if (js.getMem() != null) {
|
||||
heapUsed += js.getMem().getHeapUsed().bytes();
|
||||
heapMax += js.getMem().getHeapMax().bytes();
|
||||
heapUsed += js.getMem().getHeapUsed().getBytes();
|
||||
heapMax += js.getMem().getHeapMax().getBytes();
|
||||
}
|
||||
}
|
||||
this.threads = threads;
|
||||
@ -506,4 +538,43 @@ public class ClusterStatsNodes implements ToXContent {
|
||||
return vmVersion.hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
static class NetworkTypes implements ToXContent {
|
||||
|
||||
private final Map<String, AtomicInteger> transportTypes;
|
||||
private final Map<String, AtomicInteger> httpTypes;
|
||||
|
||||
private NetworkTypes(final List<NodeInfo> nodeInfos) {
|
||||
final Map<String, AtomicInteger> transportTypes = new HashMap<>();
|
||||
final Map<String, AtomicInteger> httpTypes = new HashMap<>();
|
||||
for (final NodeInfo nodeInfo : nodeInfos) {
|
||||
final Settings settings = nodeInfo.getSettings();
|
||||
final String transportType =
|
||||
settings.get(NetworkModule.TRANSPORT_TYPE_KEY, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
final String httpType =
|
||||
settings.get(NetworkModule.HTTP_TYPE_KEY, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
transportTypes.computeIfAbsent(transportType, k -> new AtomicInteger()).incrementAndGet();
|
||||
httpTypes.computeIfAbsent(httpType, k -> new AtomicInteger()).incrementAndGet();
|
||||
}
|
||||
this.transportTypes = Collections.unmodifiableMap(transportTypes);
|
||||
this.httpTypes = Collections.unmodifiableMap(httpTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
||||
builder.startObject("transport_types");
|
||||
for (final Map.Entry<String, AtomicInteger> entry : transportTypes.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue().get());
|
||||
}
|
||||
builder.endObject();
|
||||
builder.startObject("http_types");
|
||||
for (final Map.Entry<String, AtomicInteger> entry : httpTypes.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue().get());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -91,8 +91,8 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||
|
||||
@Override
|
||||
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
|
||||
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false, false);
|
||||
NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
|
||||
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
|
||||
List<ShardStats> shardsStats = new ArrayList<>();
|
||||
for (IndexService indexService : indicesService) {
|
||||
for (IndexShard indexShard : indexService) {
|
||||
|
@ -21,29 +21,22 @@ package org.elasticsearch.action.admin.indices.alias;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Cluster state update request that allows to add or remove aliases
|
||||
*/
|
||||
public class IndicesAliasesClusterStateUpdateRequest extends ClusterStateUpdateRequest<IndicesAliasesClusterStateUpdateRequest> {
|
||||
private final List<AliasAction> actions;
|
||||
|
||||
AliasAction[] actions;
|
||||
|
||||
public IndicesAliasesClusterStateUpdateRequest() {
|
||||
|
||||
public IndicesAliasesClusterStateUpdateRequest(List<AliasAction> actions) {
|
||||
this.actions = actions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the alias actions to be performed
|
||||
*/
|
||||
public AliasAction[] actions() {
|
||||
public List<AliasAction> actions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the alias actions to be executed
|
||||
*/
|
||||
public IndicesAliasesClusterStateUpdateRequest actions(AliasAction[] actions) {
|
||||
this.actions = actions;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -20,6 +20,8 @@
|
||||
package org.elasticsearch.action.admin.indices.alias;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.AliasesRequest;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
@ -27,30 +29,41 @@ import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction.Type;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.cluster.metadata.AliasAction.readAliasAction;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ObjectParser.fromList;
|
||||
|
||||
/**
|
||||
* A request to add/remove aliases for one or more indices.
|
||||
*/
|
||||
public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> implements CompositeIndicesRequest {
|
||||
|
||||
private List<AliasActions> allAliasActions = new ArrayList<>();
|
||||
|
||||
//indices options that require every specified index to exist, expand wildcards only to open indices and
|
||||
@ -61,94 +74,317 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* Aliases can be added by passing multiple indices to the Request and
|
||||
* deleted by passing multiple indices and aliases. They are expanded into
|
||||
* distinct AliasAction instances when the request is processed. This class
|
||||
* holds the AliasAction and in addition the arrays or alias names and
|
||||
* indices that is later used to create the final AliasAction instances.
|
||||
/**
|
||||
* Request to take one or more actions on one or more indexes and alias combinations.
|
||||
*/
|
||||
public static class AliasActions implements AliasesRequest {
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
public static class AliasActions implements AliasesRequest, Writeable {
|
||||
public enum Type {
|
||||
ADD((byte) 0),
|
||||
REMOVE((byte) 1),
|
||||
REMOVE_INDEX((byte) 2);
|
||||
|
||||
private final byte value;
|
||||
|
||||
Type(byte value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public byte value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public static Type fromValue(byte value) {
|
||||
switch (value) {
|
||||
case 0: return ADD;
|
||||
case 1: return REMOVE;
|
||||
case 2: return REMOVE_INDEX;
|
||||
default: throw new IllegalArgumentException("No type for action [" + value + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a new {@link AliasAction} to add aliases.
|
||||
*/
|
||||
public static AliasActions add() {
|
||||
return new AliasActions(AliasActions.Type.ADD);
|
||||
}
|
||||
/**
|
||||
* Build a new {@link AliasAction} to remove aliases.
|
||||
*/
|
||||
public static AliasActions remove() {
|
||||
return new AliasActions(AliasActions.Type.REMOVE);
|
||||
}
|
||||
/**
|
||||
* Build a new {@link AliasAction} to remove aliases.
|
||||
*/
|
||||
public static AliasActions removeIndex() {
|
||||
return new AliasActions(AliasActions.Type.REMOVE_INDEX);
|
||||
}
|
||||
private static ObjectParser<AliasActions, ParseFieldMatcherSupplier> parser(String name, Supplier<AliasActions> supplier) {
|
||||
ObjectParser<AliasActions, ParseFieldMatcherSupplier> parser = new ObjectParser<>(name, supplier);
|
||||
parser.declareString((action, index) -> {
|
||||
if (action.indices() != null) {
|
||||
throw new IllegalArgumentException("Only one of [index] and [indices] is supported");
|
||||
}
|
||||
action.index(index);
|
||||
}, new ParseField("index"));
|
||||
parser.declareStringArray(fromList(String.class, (action, indices) -> {
|
||||
if (action.indices() != null) {
|
||||
throw new IllegalArgumentException("Only one of [index] and [indices] is supported");
|
||||
}
|
||||
action.indices(indices);
|
||||
}), new ParseField("indices"));
|
||||
parser.declareString((action, alias) -> {
|
||||
if (action.aliases() != null && action.aliases().length != 0) {
|
||||
throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported");
|
||||
}
|
||||
action.alias(alias);
|
||||
}, new ParseField("alias"));
|
||||
parser.declareStringArray(fromList(String.class, (action, aliases) -> {
|
||||
if (action.aliases() != null && action.aliases().length != 0) {
|
||||
throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported");
|
||||
}
|
||||
action.aliases(aliases);
|
||||
}), new ParseField("aliases"));
|
||||
return parser;
|
||||
}
|
||||
|
||||
private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> ADD_PARSER = parser("add", AliasActions::add);
|
||||
static {
|
||||
ADD_PARSER.declareObject(AliasActions::filter, (parser, m) -> {
|
||||
try {
|
||||
return parser.mapOrdered();
|
||||
} catch (IOException e) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Problems parsing [filter]", e);
|
||||
}
|
||||
}, new ParseField("filter"));
|
||||
// Since we need to support numbers AND strings here we have to use ValueType.INT.
|
||||
ADD_PARSER.declareField(AliasActions::routing, p -> p.text(), new ParseField("routing"), ValueType.INT);
|
||||
ADD_PARSER.declareField(AliasActions::indexRouting, p -> p.text(), new ParseField("index_routing"), ValueType.INT);
|
||||
ADD_PARSER.declareField(AliasActions::searchRouting, p -> p.text(), new ParseField("search_routing"), ValueType.INT);
|
||||
}
|
||||
private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> REMOVE_PARSER = parser("remove", AliasActions::remove);
|
||||
private static final ObjectParser<AliasActions, ParseFieldMatcherSupplier> REMOVE_INDEX_PARSER = parser("remove_index",
|
||||
AliasActions::removeIndex);
|
||||
|
||||
/**
|
||||
* Parser for any one {@link AliasAction}.
|
||||
*/
|
||||
public static final ConstructingObjectParser<AliasActions, ParseFieldMatcherSupplier> PARSER = new ConstructingObjectParser<>(
|
||||
"alias_action", a -> {
|
||||
// Take the first action and complain if there are more than one actions
|
||||
AliasActions action = null;
|
||||
for (Object o : a) {
|
||||
if (o != null) {
|
||||
if (action == null) {
|
||||
action = (AliasActions) o;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Too many operations declared in on opeation entry");
|
||||
}
|
||||
}
|
||||
}
|
||||
return action;
|
||||
});
|
||||
static {
|
||||
PARSER.declareObject(optionalConstructorArg(), ADD_PARSER, new ParseField("add"));
|
||||
PARSER.declareObject(optionalConstructorArg(), REMOVE_PARSER, new ParseField("remove"));
|
||||
PARSER.declareObject(optionalConstructorArg(), REMOVE_INDEX_PARSER, new ParseField("remove_index"));
|
||||
}
|
||||
|
||||
private final AliasActions.Type type;
|
||||
private String[] indices;
|
||||
private String[] aliases = Strings.EMPTY_ARRAY;
|
||||
private AliasAction aliasAction;
|
||||
private String filter;
|
||||
private String routing;
|
||||
private String indexRouting;
|
||||
private String searchRouting;
|
||||
|
||||
public AliasActions(AliasAction.Type type, String[] indices, String[] aliases) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(indices);
|
||||
aliases(aliases);
|
||||
AliasActions(AliasActions.Type type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public AliasActions(AliasAction.Type type, String index, String alias) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(index);
|
||||
aliases(alias);
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public AliasActions(StreamInput in) throws IOException {
|
||||
type = AliasActions.Type.fromValue(in.readByte());
|
||||
indices = in.readStringArray();
|
||||
aliases = in.readStringArray();
|
||||
filter = in.readOptionalString();
|
||||
routing = in.readOptionalString();
|
||||
searchRouting = in.readOptionalString();
|
||||
indexRouting = in.readOptionalString();
|
||||
}
|
||||
|
||||
AliasActions(AliasAction.Type type, String[] index, String alias) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(index);
|
||||
aliases(alias);
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(type.value());
|
||||
out.writeStringArray(indices);
|
||||
out.writeStringArray(aliases);
|
||||
out.writeOptionalString(filter);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(searchRouting);
|
||||
out.writeOptionalString(indexRouting);
|
||||
}
|
||||
|
||||
public AliasActions(AliasAction action) {
|
||||
this.aliasAction = action;
|
||||
indices(action.index());
|
||||
aliases(action.alias());
|
||||
/**
|
||||
* Validate that the action is sane. Called when the action is added to the request because actions can be invalid while being
|
||||
* built.
|
||||
*/
|
||||
void validate() {
|
||||
if (indices == null) {
|
||||
throw new IllegalArgumentException("One of [index] or [indices] is required");
|
||||
}
|
||||
if (type != AliasActions.Type.REMOVE_INDEX && (aliases == null || aliases.length == 0)) {
|
||||
throw new IllegalArgumentException("One of [alias] or [aliases] is required");
|
||||
}
|
||||
}
|
||||
|
||||
public AliasActions(Type type, String index, String[] aliases) {
|
||||
aliasAction = new AliasAction(type);
|
||||
indices(index);
|
||||
aliases(aliases);
|
||||
}
|
||||
|
||||
public AliasActions() {
|
||||
}
|
||||
|
||||
public AliasActions filter(Map<String, Object> filter) {
|
||||
aliasAction.filter(filter);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AliasActions filter(QueryBuilder filter) {
|
||||
aliasAction.filter(filter);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Type actionType() {
|
||||
return aliasAction.actionType();
|
||||
}
|
||||
|
||||
public void routing(String routing) {
|
||||
aliasAction.routing(routing);
|
||||
}
|
||||
|
||||
public void searchRouting(String searchRouting) {
|
||||
aliasAction.searchRouting(searchRouting);
|
||||
}
|
||||
|
||||
public void indexRouting(String indexRouting) {
|
||||
aliasAction.indexRouting(indexRouting);
|
||||
}
|
||||
|
||||
public AliasActions filter(String filter) {
|
||||
aliasAction.filter(filter);
|
||||
return this;
|
||||
/**
|
||||
* Type of the action to perform.
|
||||
*/
|
||||
public AliasActions.Type actionType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AliasActions indices(String... indices) {
|
||||
if (indices == null || indices.length == 0) {
|
||||
throw new IllegalArgumentException("[indices] can't be empty");
|
||||
}
|
||||
for (String index : indices) {
|
||||
if (false == Strings.hasLength(index)) {
|
||||
throw new IllegalArgumentException("[indices] can't contain empty string");
|
||||
}
|
||||
}
|
||||
this.indices = indices;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the index this action is operating on.
|
||||
*/
|
||||
public AliasActions index(String index) {
|
||||
if (false == Strings.hasLength(index)) {
|
||||
throw new IllegalArgumentException("[index] can't be empty string");
|
||||
}
|
||||
this.indices = new String[] {index};
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Aliases to use with this action.
|
||||
*/
|
||||
@Override
|
||||
public AliasActions aliases(String... aliases) {
|
||||
if (type == AliasActions.Type.REMOVE_INDEX) {
|
||||
throw new IllegalArgumentException("[aliases] is unsupported for [" + type + "]");
|
||||
}
|
||||
if (aliases == null || aliases.length == 0) {
|
||||
throw new IllegalArgumentException("[aliases] can't be empty");
|
||||
}
|
||||
for (String alias : aliases) {
|
||||
if (false == Strings.hasLength(alias)) {
|
||||
throw new IllegalArgumentException("[aliases] can't contain empty string");
|
||||
}
|
||||
}
|
||||
this.aliases = aliases;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the alias this action is operating on.
|
||||
*/
|
||||
public AliasActions alias(String alias) {
|
||||
if (type == AliasActions.Type.REMOVE_INDEX) {
|
||||
throw new IllegalArgumentException("[alias] is unsupported for [" + type + "]");
|
||||
}
|
||||
if (false == Strings.hasLength(alias)) {
|
||||
throw new IllegalArgumentException("[alias] can't be empty string");
|
||||
}
|
||||
this.aliases = new String[] {alias};
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the default routing.
|
||||
*/
|
||||
public AliasActions routing(String routing) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[routing] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.routing = routing;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String searchRouting() {
|
||||
return searchRouting == null ? routing : searchRouting;
|
||||
}
|
||||
|
||||
public AliasActions searchRouting(String searchRouting) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[search_routing] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.searchRouting = searchRouting;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String indexRouting() {
|
||||
return indexRouting == null ? routing : indexRouting;
|
||||
}
|
||||
|
||||
public AliasActions indexRouting(String indexRouting) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[index_routing] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.indexRouting = indexRouting;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String filter() {
|
||||
return filter;
|
||||
}
|
||||
|
||||
public AliasActions filter(String filter) {
|
||||
if (type != AliasActions.Type.ADD) {
|
||||
throw new IllegalArgumentException("[filter] is unsupported for [" + type + "]");
|
||||
}
|
||||
this.filter = filter;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AliasActions filter(Map<String, Object> filter) {
|
||||
if (filter == null || filter.isEmpty()) {
|
||||
this.filter = null;
|
||||
return this;
|
||||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
builder.map(filter);
|
||||
this.filter = builder.string();
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to generate [" + filter + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
public AliasActions filter(QueryBuilder filter) {
|
||||
if (filter == null) {
|
||||
this.filter = null;
|
||||
return this;
|
||||
}
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
filter.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.close();
|
||||
this.filter = builder.string();
|
||||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to build json for alias request", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] aliases() {
|
||||
return aliases;
|
||||
@ -157,7 +393,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
||||
@Override
|
||||
public boolean expandAliasesWildcards() {
|
||||
//remove operations support wildcards among aliases, add operations don't
|
||||
return aliasAction.actionType() == Type.REMOVE;
|
||||
return type == Type.REMOVE;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -170,10 +406,6 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
||||
return INDICES_OPTIONS;
|
||||
}
|
||||
|
||||
public AliasAction aliasAction() {
|
||||
return aliasAction;
|
||||
}
|
||||
|
||||
public String[] concreteAliases(MetaData metaData, String concreteIndex) {
|
||||
if (expandAliasesWildcards()) {
|
||||
//for DELETE we expand the aliases
|
||||
@ -191,83 +423,48 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
||||
return aliases;
|
||||
}
|
||||
}
|
||||
public AliasActions readFrom(StreamInput in) throws IOException {
|
||||
indices = in.readStringArray();
|
||||
aliases = in.readStringArray();
|
||||
aliasAction = readAliasAction(in);
|
||||
return this;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AliasActions["
|
||||
+ "type=" + type
|
||||
+ ",indices=" + Arrays.toString(indices)
|
||||
+ ",aliases=" + Arrays.deepToString(aliases)
|
||||
+ ",filter=" + filter
|
||||
+ ",routing=" + routing
|
||||
+ ",indexRouting=" + indexRouting
|
||||
+ ",searchRouting=" + searchRouting
|
||||
+ "]";
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeStringArray(indices);
|
||||
out.writeStringArray(aliases);
|
||||
this.aliasAction.writeTo(out);
|
||||
// equals, and hashCode implemented for easy testing of round trip
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
AliasActions other = (AliasActions) obj;
|
||||
return Objects.equals(type, other.type)
|
||||
&& Arrays.equals(indices, other.indices)
|
||||
&& Arrays.equals(aliases, other.aliases)
|
||||
&& Objects.equals(filter, other.filter)
|
||||
&& Objects.equals(routing, other.routing)
|
||||
&& Objects.equals(indexRouting, other.indexRouting)
|
||||
&& Objects.equals(searchRouting, other.searchRouting);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(type, indices, aliases, filter, routing, indexRouting, searchRouting);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias to the index.
|
||||
* @param alias The alias
|
||||
* @param indices The indices
|
||||
* Add the action to this request and validate it.
|
||||
*/
|
||||
public IndicesAliasesRequest addAlias(String alias, String... indices) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
public void addAliasAction(AliasActions aliasAction) {
|
||||
public IndicesAliasesRequest addAliasAction(AliasActions aliasAction) {
|
||||
aliasAction.validate();
|
||||
allAliasActions.add(aliasAction);
|
||||
}
|
||||
|
||||
|
||||
public IndicesAliasesRequest addAliasAction(AliasAction action) {
|
||||
addAliasAction(new AliasActions(action));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias to the index.
|
||||
* @param alias The alias
|
||||
* @param filter The filter
|
||||
* @param indices The indices
|
||||
*/
|
||||
public IndicesAliasesRequest addAlias(String alias, Map<String, Object> filter, String... indices) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias to the index.
|
||||
* @param alias The alias
|
||||
* @param filterBuilder The filter
|
||||
* @param indices The indices
|
||||
*/
|
||||
public IndicesAliasesRequest addAlias(String alias, QueryBuilder filterBuilder, String... indices) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filterBuilder));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Removes an alias to the index.
|
||||
*
|
||||
* @param indices The indices
|
||||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequest removeAlias(String[] indices, String... aliases) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.REMOVE, indices, aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an alias to the index.
|
||||
*
|
||||
* @param index The index
|
||||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequest removeAlias(String index, String... aliases) {
|
||||
addAliasAction(new AliasActions(AliasAction.Type.REMOVE, index, aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -285,50 +482,20 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
||||
if (allAliasActions.isEmpty()) {
|
||||
return addValidationError("Must specify at least one alias action", validationException);
|
||||
}
|
||||
for (AliasActions aliasAction : allAliasActions) {
|
||||
if (CollectionUtils.isEmpty(aliasAction.aliases)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: Property [alias/aliases] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String alias : aliasAction.aliases) {
|
||||
if (!Strings.hasText(alias)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [alias/aliases] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (CollectionUtils.isEmpty(aliasAction.indices)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: Property [index/indices] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String index : aliasAction.indices) {
|
||||
if (!Strings.hasText(index)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [index/indices] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
allAliasActions.add(readAliasActions(in));
|
||||
}
|
||||
allAliasActions = in.readList(AliasActions::new);
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(allAliasActions.size());
|
||||
for (AliasActions aliasAction : allAliasActions) {
|
||||
aliasAction.writeTo(out);
|
||||
}
|
||||
out.writeList(allAliasActions);
|
||||
writeTimeout(out);
|
||||
}
|
||||
|
||||
@ -336,11 +503,6 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
||||
return INDICES_OPTIONS;
|
||||
}
|
||||
|
||||
private static AliasActions readAliasActions(StreamInput in) throws IOException {
|
||||
AliasActions actions = new AliasActions();
|
||||
return actions.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<? extends IndicesRequest> subRequests() {
|
||||
return allAliasActions;
|
||||
|
@ -22,15 +22,15 @@ package org.elasticsearch.action.admin.indices.alias;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
* Builder for request to modify many aliases at once.
|
||||
*/
|
||||
public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
|
||||
public class IndicesAliasesRequestBuilder
|
||||
extends AcknowledgedRequestBuilder<IndicesAliasesRequest, IndicesAliasesResponse, IndicesAliasesRequestBuilder> {
|
||||
|
||||
public IndicesAliasesRequestBuilder(ElasticsearchClient client, IndicesAliasesAction action) {
|
||||
super(client, action, new IndicesAliasesRequest());
|
||||
@ -43,7 +43,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param alias The alias
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias) {
|
||||
request.addAlias(alias, index);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param alias The alias
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias) {
|
||||
request.addAlias(alias, indices);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -66,8 +66,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias, String filter) {
|
||||
AliasActions action = new AliasActions(AliasAction.Type.ADD, index, alias).filter(filter);
|
||||
request.addAliasAction(action);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -79,8 +78,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, String filter) {
|
||||
AliasActions action = new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filter);
|
||||
request.addAliasAction(action);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -92,7 +90,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String[] indices, String alias, Map<String, Object> filter) {
|
||||
request.addAlias(alias, filter, indices);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -104,7 +102,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param filter The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias, Map<String, Object> filter) {
|
||||
request.addAlias(alias, filter, index);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filter));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -116,7 +114,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param filterBuilder The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String indices[], String alias, QueryBuilder filterBuilder) {
|
||||
request.addAlias(alias, filterBuilder, indices);
|
||||
request.addAliasAction(AliasActions.add().indices(indices).alias(alias).filter(filterBuilder));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -128,7 +126,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param filterBuilder The filter
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAlias(String index, String alias, QueryBuilder filterBuilder) {
|
||||
request.addAlias(alias, filterBuilder, index);
|
||||
request.addAliasAction(AliasActions.add().index(index).alias(alias).filter(filterBuilder));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -139,7 +137,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param alias The alias
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder removeAlias(String index, String alias) {
|
||||
request.removeAlias(index, alias);
|
||||
request.addAliasAction(AliasActions.remove().index(index).alias(alias));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -150,7 +148,7 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder removeAlias(String[] indices, String... aliases) {
|
||||
request.removeAlias(indices, aliases);
|
||||
request.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -161,17 +159,12 @@ public class IndicesAliasesRequestBuilder extends AcknowledgedRequestBuilder<Ind
|
||||
* @param aliases The aliases
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder removeAlias(String index, String[] aliases) {
|
||||
request.removeAlias(index, aliases);
|
||||
request.addAliasAction(AliasActions.remove().index(index).aliases(aliases));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an alias action to the request.
|
||||
*
|
||||
* @param aliasAction The alias action
|
||||
*/
|
||||
public IndicesAliasesRequestBuilder addAliasAction(AliasAction aliasAction) {
|
||||
request.addAliasAction(aliasAction);
|
||||
public IndicesAliasesRequestBuilder removeIndex(String index) {
|
||||
request.addAliasAction(AliasActions.removeIndex().index(index));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -43,6 +43,8 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
||||
/**
|
||||
* Add/remove aliases action
|
||||
*/
|
||||
@ -86,31 +88,38 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
|
||||
//Expand the indices names
|
||||
List<AliasActions> actions = request.aliasActions();
|
||||
List<AliasAction> finalActions = new ArrayList<>();
|
||||
boolean hasOnlyDeletesButNoneCanBeDone = true;
|
||||
|
||||
// Resolve all the AliasActions into AliasAction instances and gather all the aliases
|
||||
Set<String> aliases = new HashSet<>();
|
||||
for (AliasActions action : actions) {
|
||||
//expand indices
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices());
|
||||
//collect the aliases
|
||||
Collections.addAll(aliases, action.aliases());
|
||||
for (String index : concreteIndices) {
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
AliasAction finalAction = new AliasAction(action.aliasAction());
|
||||
finalAction.index(index);
|
||||
finalAction.alias(alias);
|
||||
finalActions.add(finalAction);
|
||||
//if there is only delete requests, none will be added if the types do not map to any existing type
|
||||
hasOnlyDeletesButNoneCanBeDone = false;
|
||||
switch (action.actionType()) {
|
||||
case ADD:
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
finalActions.add(new AliasAction.Add(index, alias, action.filter(), action.indexRouting(), action.searchRouting()));
|
||||
}
|
||||
break;
|
||||
case REMOVE:
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
finalActions.add(new AliasAction.Remove(index, alias));
|
||||
}
|
||||
break;
|
||||
case REMOVE_INDEX:
|
||||
finalActions.add(new AliasAction.RemoveIndex(index));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unsupported action [" + action.actionType() + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (hasOnlyDeletesButNoneCanBeDone && actions.size() != 0) {
|
||||
if (finalActions.isEmpty() && false == actions.isEmpty()) {
|
||||
throw new AliasesNotFoundException(aliases.toArray(new String[aliases.size()]));
|
||||
}
|
||||
request.aliasActions().clear();
|
||||
IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest()
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.actions(finalActions.toArray(new AliasAction[finalActions.size()]));
|
||||
IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(unmodifiableList(finalActions))
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout());
|
||||
|
||||
indexAliasesService.indicesAliases(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
|
@ -292,7 +292,7 @@ public class DetailAnalyzeResponse implements Streamable, ToXContent {
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields.NAME, name);
|
||||
builder.field(Fields.FILTERED_TEXT, texts);
|
||||
builder.array(Fields.FILTERED_TEXT, texts);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -45,9 +45,9 @@ import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
@ -145,45 +145,46 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
}
|
||||
}
|
||||
final AnalysisRegistry analysisRegistry = indicesService.getAnalysis();
|
||||
return analyze(request, field, analyzer, indexService != null ? indexService.analysisService() : null, analysisRegistry, environment);
|
||||
return analyze(request, field, analyzer, indexService != null ? indexService.getIndexAnalyzers() : null, analysisRegistry, environment);
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("analysis failed", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, AnalysisService analysisService, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
|
||||
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, IndexAnalyzers indexAnalyzers, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
|
||||
|
||||
boolean closeAnalyzer = false;
|
||||
if (analyzer == null && request.analyzer() != null) {
|
||||
if (analysisService == null) {
|
||||
if (indexAnalyzers == null) {
|
||||
analyzer = analysisRegistry.getAnalyzer(request.analyzer());
|
||||
if (analyzer == null) {
|
||||
throw new IllegalArgumentException("failed to find global analyzer [" + request.analyzer() + "]");
|
||||
}
|
||||
} else {
|
||||
analyzer = analysisService.analyzer(request.analyzer());
|
||||
analyzer = indexAnalyzers.get(request.analyzer());
|
||||
if (analyzer == null) {
|
||||
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
} else if (request.tokenizer() != null) {
|
||||
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, analysisService, analysisRegistry, environment);
|
||||
final IndexSettings indexSettings = indexAnalyzers == null ? null : indexAnalyzers.getIndexSettings();
|
||||
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, indexAnalyzers, analysisRegistry, environment);
|
||||
|
||||
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
|
||||
tokenFilterFactories = getTokenFilterFactories(request, analysisService, analysisRegistry, environment, tokenFilterFactories);
|
||||
tokenFilterFactories = getTokenFilterFactories(request, indexSettings, analysisRegistry, environment, tokenFilterFactories);
|
||||
|
||||
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
|
||||
charFilterFactories = getCharFilterFactories(request, analysisService, analysisRegistry, environment, charFilterFactories);
|
||||
charFilterFactories = getCharFilterFactories(request, indexSettings, analysisRegistry, environment, charFilterFactories);
|
||||
|
||||
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
|
||||
closeAnalyzer = true;
|
||||
} else if (analyzer == null) {
|
||||
if (analysisService == null) {
|
||||
if (indexAnalyzers == null) {
|
||||
analyzer = analysisRegistry.getAnalyzer("standard");
|
||||
} else {
|
||||
analyzer = analysisService.defaultIndexAnalyzer();
|
||||
analyzer = indexAnalyzers.getDefaultIndexAnalyzer();
|
||||
}
|
||||
}
|
||||
if (analyzer == null) {
|
||||
@ -446,7 +447,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
return extendedAttributes;
|
||||
}
|
||||
|
||||
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
|
||||
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
|
||||
Environment environment, CharFilterFactory[] charFilterFactories) throws IOException {
|
||||
if (request.charFilters() != null && request.charFilters().size() > 0) {
|
||||
charFilterFactories = new CharFilterFactory[request.charFilters().size()];
|
||||
@ -468,19 +469,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter_[" + i + "]", settings);
|
||||
} else {
|
||||
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory;
|
||||
if (analysisService == null) {
|
||||
if (indexSettings == null) {
|
||||
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilter.name);
|
||||
} else {
|
||||
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name, analysisService.getIndexSettings());
|
||||
charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name, indexSettings);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(analysisService.getIndexSettings(), environment, charFilter.name,
|
||||
AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(indexSettings, environment, charFilter.name,
|
||||
AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
|
||||
AnalysisRegistry.INDEX_ANALYSIS_CHAR_FILTER + "." + charFilter.name));
|
||||
}
|
||||
}
|
||||
@ -492,7 +493,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
return charFilterFactories;
|
||||
}
|
||||
|
||||
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
|
||||
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry,
|
||||
Environment environment, TokenFilterFactory[] tokenFilterFactories) throws IOException {
|
||||
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
|
||||
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()];
|
||||
@ -514,19 +515,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter_[" + i + "]", settings);
|
||||
} else {
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory;
|
||||
if (analysisService == null) {
|
||||
if (indexSettings == null) {
|
||||
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name);
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
|
||||
} else {
|
||||
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name, analysisService.getIndexSettings());
|
||||
tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name, indexSettings);
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(analysisService.getIndexSettings(), environment, tokenFilter.name,
|
||||
AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name,
|
||||
AnalysisRegistry.getSettingsFromIndexSettings(indexSettings,
|
||||
AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name));
|
||||
}
|
||||
}
|
||||
@ -538,7 +539,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
return tokenFilterFactories;
|
||||
}
|
||||
|
||||
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, AnalysisService analysisService,
|
||||
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers,
|
||||
AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
|
||||
TokenizerFactory tokenizerFactory;
|
||||
final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
|
||||
@ -558,19 +559,19 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
|
||||
} else {
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory;
|
||||
if (analysisService == null) {
|
||||
if (indexAnalzyers == null) {
|
||||
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
|
||||
} else {
|
||||
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, analysisService.getIndexSettings());
|
||||
tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, indexAnalzyers.getIndexSettings());
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(analysisService.getIndexSettings(), environment, tokenizer.name,
|
||||
AnalysisRegistry.getSettingsFromIndexSettings(analysisService.getIndexSettings(),
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(indexAnalzyers.getIndexSettings(), environment, tokenizer.name,
|
||||
AnalysisRegistry.getSettingsFromIndexSettings(indexAnalzyers.getIndexSettings(),
|
||||
AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizer.name));
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
@ -108,7 +110,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to close indices [{}]", t, (Object)concreteIndices);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
@ -41,6 +41,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
||||
private final TransportMessage originalMessage;
|
||||
private final String cause;
|
||||
private final String index;
|
||||
private final String providedName;
|
||||
private final boolean updateAllTypes;
|
||||
private Index shrinkFrom;
|
||||
|
||||
@ -59,11 +60,13 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
|
||||
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
|
||||
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName,
|
||||
boolean updateAllTypes) {
|
||||
this.originalMessage = originalMessage;
|
||||
this.cause = cause;
|
||||
this.index = index;
|
||||
this.updateAllTypes = updateAllTypes;
|
||||
this.providedName = providedName;
|
||||
}
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest settings(Settings settings) {
|
||||
@ -151,6 +154,14 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
||||
return updateAllTypes;
|
||||
}
|
||||
|
||||
/**
|
||||
* The name that was provided by the user. This might contain a date math expression.
|
||||
* @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME
|
||||
*/
|
||||
public String getProvidedName() {
|
||||
return providedName;
|
||||
}
|
||||
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
|
||||
}
|
||||
|
||||
final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index());
|
||||
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.updateAllTypes())
|
||||
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index(), request.updateAllTypes())
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.settings(request.settings()).mappings(request.mappings())
|
||||
.aliases(request.aliases()).customs(request.customs())
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.delete;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
@ -100,7 +102,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to delete indices [{}]", t, concreteIndices);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
@ -40,7 +40,7 @@ import java.io.IOException;
|
||||
public class FlushRequest extends BroadcastRequest<FlushRequest> {
|
||||
|
||||
private boolean force = false;
|
||||
private boolean waitIfOngoing = false;
|
||||
private boolean waitIfOngoing = true;
|
||||
|
||||
/**
|
||||
* Constructs a new flush request against one or more indices. If nothing is provided, all indices will
|
||||
@ -61,6 +61,7 @@ public class FlushRequest extends BroadcastRequest<FlushRequest> {
|
||||
/**
|
||||
* if set to <tt>true</tt> the flush will block
|
||||
* if a another flush operation is already running until the flush can be performed.
|
||||
* The default is <code>true</code>
|
||||
*/
|
||||
public FlushRequest waitIfOngoing(boolean waitIfOngoing) {
|
||||
this.waitIfOngoing = waitIfOngoing;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user