Merge remote-tracking branch 'upstream/master' into index-lifecycle
This commit is contained in:
commit
62ac2fa5ec
|
@ -77,39 +77,24 @@ Run a single test case (variants)
|
|||
./gradlew test "-Dtests.class=*.ClassName"
|
||||
----------------------------------------------------------
|
||||
|
||||
Run all tests in a package and sub-packages
|
||||
Run all tests in a package and its sub-packages
|
||||
|
||||
----------------------------------------------------
|
||||
./gradlew test "-Dtests.class=org.elasticsearch.package.*"
|
||||
----------------------------------------------------
|
||||
|
||||
Run any test methods that contain 'esi' (like: ...r*esi*ze...).
|
||||
Run any test methods that contain 'esi' (like: ...r*esi*ze...)
|
||||
|
||||
-------------------------------
|
||||
./gradlew test "-Dtests.method=*esi*"
|
||||
-------------------------------
|
||||
|
||||
You can also filter tests by certain annotations ie:
|
||||
|
||||
* `@Nightly` - tests that only run in nightly builds (disabled by default)
|
||||
* `@Backwards` - backwards compatibility tests (disabled by default)
|
||||
* `@AwaitsFix` - tests that are waiting for a bugfix (disabled by default)
|
||||
* `@BadApple` - tests that are known to fail randomly (disabled by default)
|
||||
|
||||
Those annotation names can be combined into a filter expression like:
|
||||
Run all tests that are waiting for a bugfix (disabled by default)
|
||||
|
||||
------------------------------------------------
|
||||
./gradlew test -Dtests.filter="@nightly and not @backwards"
|
||||
./gradlew test -Dtests.filter=@awaitsfix
|
||||
------------------------------------------------
|
||||
|
||||
to run all nightly test but not the ones that are backwards tests. `tests.filter` supports
|
||||
the boolean operators `and, or, not` and grouping ie:
|
||||
|
||||
|
||||
---------------------------------------------------------------
|
||||
./gradlew test -Dtests.filter="@nightly and not(@badapple or @backwards)"
|
||||
---------------------------------------------------------------
|
||||
|
||||
=== Seed and repetitions.
|
||||
|
||||
Run with a given seed (seed is a hex-encoded long).
|
||||
|
@ -160,8 +145,6 @@ Test groups can be enabled or disabled (true/false).
|
|||
Default value provided below in [brackets].
|
||||
|
||||
------------------------------------------------------------------
|
||||
./gradlew test -Dtests.nightly=[false] - nightly test group (@Nightly)
|
||||
./gradlew test -Dtests.weekly=[false] - weekly tests (@Weekly)
|
||||
./gradlew test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix)
|
||||
------------------------------------------------------------------
|
||||
|
||||
|
|
|
@ -106,6 +106,7 @@ dependencies {
|
|||
compile 'org.apache.rat:apache-rat:0.11'
|
||||
compile "org.elasticsearch:jna:4.5.1"
|
||||
compile 'com.github.jengelman.gradle.plugins:shadow:2.0.4'
|
||||
compile 'de.thetaphi:forbiddenapis:2.6'
|
||||
testCompile "junit:junit:${props.getProperty('junit')}"
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,10 @@
|
|||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
|
||||
import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask
|
||||
import org.gradle.api.JavaVersion
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
|
@ -33,7 +36,7 @@ class PrecommitTasks {
|
|||
public static Task create(Project project, boolean includeDependencyLicenses) {
|
||||
project.configurations.create("forbiddenApisCliJar")
|
||||
project.dependencies {
|
||||
forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5')
|
||||
forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.6')
|
||||
}
|
||||
|
||||
List<Task> precommitTasks = [
|
||||
|
@ -109,15 +112,13 @@ class PrecommitTasks {
|
|||
}
|
||||
|
||||
private static Task configureForbiddenApisCli(Project project) {
|
||||
Task forbiddenApisCli = project.tasks.create('forbiddenApis')
|
||||
project.sourceSets.all { sourceSet ->
|
||||
forbiddenApisCli.dependsOn(
|
||||
project.tasks.create(sourceSet.getTaskName('forbiddenApis', null), ForbiddenApisCliTask) {
|
||||
project.pluginManager.apply(ForbiddenApisPlugin)
|
||||
ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources')
|
||||
project.tasks.withType(CheckForbiddenApis) {
|
||||
dependsOn(buildResources)
|
||||
it.sourceSet = sourceSet
|
||||
javaHome = project.runtimeJavaHome
|
||||
targetCompatibility = project.compilerJavaVersion
|
||||
targetCompatibility = project.runtimeJavaVersion >= JavaVersion.VERSION_1_9 ?
|
||||
project.runtimeJavaVersion.getMajorVersion() :
|
||||
project.runtimeJavaVersion
|
||||
bundledSignatures = [
|
||||
"jdk-unsafe", "jdk-deprecated", "jdk-non-portable", "jdk-system-out"
|
||||
]
|
||||
|
@ -126,7 +127,7 @@ class PrecommitTasks {
|
|||
buildResources.copy("forbidden/es-all-signatures.txt")
|
||||
)
|
||||
suppressAnnotations = ['**.SuppressForbidden']
|
||||
if (sourceSet.name == 'test') {
|
||||
if (name.endsWith('Test')) {
|
||||
signaturesFiles += project.files(
|
||||
buildResources.copy("forbidden/es-test-signatures.txt"),
|
||||
buildResources.copy("forbidden/http-signatures.txt")
|
||||
|
@ -134,8 +135,6 @@ class PrecommitTasks {
|
|||
} else {
|
||||
signaturesFiles += project.files(buildResources.copy("forbidden/es-server-signatures.txt"))
|
||||
}
|
||||
dependsOn sourceSet.classesTaskName
|
||||
classesDirs = sourceSet.output.classesDirs
|
||||
ext.replaceSignatureFiles = { String... names ->
|
||||
signaturesFiles = project.files(
|
||||
names.collect { buildResources.copy("forbidden/${it}.txt") }
|
||||
|
@ -147,9 +146,9 @@ class PrecommitTasks {
|
|||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
return forbiddenApisCli
|
||||
Task forbiddenApis = project.tasks.getByName("forbiddenApis")
|
||||
forbiddenApis.group = ""
|
||||
return forbiddenApis
|
||||
}
|
||||
|
||||
private static Task configureCheckstyle(Project project) {
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.precommit;
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec;
|
||||
import org.gradle.api.JavaVersion;
|
||||
import org.gradle.api.artifacts.Configuration;
|
||||
import org.gradle.api.file.FileCollection;
|
||||
import org.gradle.api.logging.Logger;
|
||||
import org.gradle.api.logging.Logging;
|
||||
import org.gradle.api.tasks.Input;
|
||||
import org.gradle.api.tasks.InputFiles;
|
||||
import org.gradle.api.tasks.SkipWhenEmpty;
|
||||
import org.gradle.api.tasks.SourceSet;
|
||||
import org.gradle.api.tasks.TaskAction;
|
||||
import org.gradle.process.JavaExecSpec;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class ForbiddenApisCliTask extends PrecommitTask {
|
||||
|
||||
private final Logger logger = Logging.getLogger(ForbiddenApisCliTask.class);
|
||||
private FileCollection signaturesFiles;
|
||||
private List<String> signatures = new ArrayList<>();
|
||||
private Set<String> bundledSignatures = new LinkedHashSet<>();
|
||||
private Set<String> suppressAnnotations = new LinkedHashSet<>();
|
||||
private JavaVersion targetCompatibility;
|
||||
private FileCollection classesDirs;
|
||||
private SourceSet sourceSet;
|
||||
// This needs to be an object so it can hold Groovy GStrings
|
||||
private Object javaHome;
|
||||
|
||||
@Input
|
||||
public JavaVersion getTargetCompatibility() {
|
||||
return targetCompatibility;
|
||||
}
|
||||
|
||||
public void setTargetCompatibility(JavaVersion targetCompatibility) {
|
||||
if (targetCompatibility.compareTo(JavaVersion.VERSION_1_10) > 0) {
|
||||
logger.warn(
|
||||
"Target compatibility is set to {} but forbiddenapis only supports up to 10. Will cap at 10.",
|
||||
targetCompatibility
|
||||
);
|
||||
this.targetCompatibility = JavaVersion.VERSION_1_10;
|
||||
} else {
|
||||
this.targetCompatibility = targetCompatibility;
|
||||
}
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
@SkipWhenEmpty
|
||||
public FileCollection getClassesDirs() {
|
||||
return classesDirs.filter(File::exists);
|
||||
}
|
||||
|
||||
public void setClassesDirs(FileCollection classesDirs) {
|
||||
this.classesDirs = classesDirs;
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
public FileCollection getSignaturesFiles() {
|
||||
return signaturesFiles;
|
||||
}
|
||||
|
||||
public void setSignaturesFiles(FileCollection signaturesFiles) {
|
||||
this.signaturesFiles = signaturesFiles;
|
||||
}
|
||||
|
||||
@Input
|
||||
public List<String> getSignatures() {
|
||||
return signatures;
|
||||
}
|
||||
|
||||
public void setSignatures(List<String> signatures) {
|
||||
this.signatures = signatures;
|
||||
}
|
||||
|
||||
@Input
|
||||
public Set<String> getBundledSignatures() {
|
||||
return bundledSignatures;
|
||||
}
|
||||
|
||||
public void setBundledSignatures(Set<String> bundledSignatures) {
|
||||
this.bundledSignatures = bundledSignatures;
|
||||
}
|
||||
|
||||
@Input
|
||||
public Set<String> getSuppressAnnotations() {
|
||||
return suppressAnnotations;
|
||||
}
|
||||
|
||||
public void setSuppressAnnotations(Set<String> suppressAnnotations) {
|
||||
this.suppressAnnotations = suppressAnnotations;
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
public FileCollection getClassPathFromSourceSet() {
|
||||
return getProject().files(
|
||||
sourceSet.getCompileClasspath(),
|
||||
sourceSet.getRuntimeClasspath()
|
||||
);
|
||||
}
|
||||
|
||||
public void setSourceSet(SourceSet sourceSet) {
|
||||
this.sourceSet = sourceSet;
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
public Configuration getForbiddenAPIsConfiguration() {
|
||||
return getProject().getConfigurations().getByName("forbiddenApisCliJar");
|
||||
}
|
||||
|
||||
@Input
|
||||
public Object getJavaHome() {
|
||||
return javaHome;
|
||||
}
|
||||
|
||||
public void setJavaHome(Object javaHome) {
|
||||
this.javaHome = javaHome;
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void runForbiddenApisAndWriteMarker() {
|
||||
LoggedExec.javaexec(getProject(), (JavaExecSpec spec) -> {
|
||||
spec.classpath(
|
||||
getForbiddenAPIsConfiguration(),
|
||||
getClassPathFromSourceSet()
|
||||
);
|
||||
spec.setExecutable(getJavaHome() + "/bin/java");
|
||||
spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain");
|
||||
// build the command line
|
||||
getSignaturesFiles().forEach(file -> spec.args("-f", file.getAbsolutePath()));
|
||||
getSuppressAnnotations().forEach(annotation -> spec.args("--suppressannotation", annotation));
|
||||
getBundledSignatures().forEach(bundled -> {
|
||||
// there's no option for target compatibility so we have to interpret it
|
||||
final String prefix;
|
||||
if (bundled.equals("jdk-system-out") ||
|
||||
bundled.equals("jdk-reflection") ||
|
||||
bundled.equals("jdk-non-portable")) {
|
||||
prefix = "";
|
||||
} else {
|
||||
prefix = "-" + (
|
||||
getTargetCompatibility().compareTo(JavaVersion.VERSION_1_9) >= 0 ?
|
||||
getTargetCompatibility().getMajorVersion() :
|
||||
"1." + getTargetCompatibility().getMajorVersion())
|
||||
;
|
||||
}
|
||||
spec.args("-b", bundled + prefix);
|
||||
}
|
||||
);
|
||||
getClassesDirs().forEach(dir ->
|
||||
spec.args("-d", dir)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
|
@ -52,7 +52,7 @@ import java.util.stream.IntStream;
|
|||
public class ThirdPartyAuditTask extends DefaultTask {
|
||||
|
||||
private static final Pattern MISSING_CLASS_PATTERN = Pattern.compile(
|
||||
"WARNING: The referenced class '(.*)' cannot be loaded\\. Please fix the classpath!"
|
||||
"WARNING: Class '(.*)' cannot be loaded \\(.*\\)\\. Please fix the classpath!"
|
||||
);
|
||||
|
||||
private static final Pattern VIOLATION_PATTERN = Pattern.compile(
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.testclusters;
|
||||
|
||||
import org.elasticsearch.gradle.Distribution;
|
||||
import org.elasticsearch.gradle.Version;
|
||||
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
public interface ElasticsearchConfiguration {
|
||||
String getName();
|
||||
|
||||
Version getVersion();
|
||||
|
||||
void setVersion(Version version);
|
||||
|
||||
default void setVersion(String version) {
|
||||
setVersion(Version.fromString(version));
|
||||
}
|
||||
|
||||
Distribution getDistribution();
|
||||
|
||||
void setDistribution(Distribution distribution);
|
||||
|
||||
void claim();
|
||||
|
||||
Future<Void> start();
|
||||
|
||||
void unClaimAndStop();
|
||||
}
|
|
@ -29,7 +29,7 @@ import java.util.concurrent.Future;
|
|||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
public class ElasticsearchNode implements ElasticsearchConfiguration {
|
||||
public class ElasticsearchNode {
|
||||
|
||||
private final String name;
|
||||
private final GradleServicesAdapter services;
|
||||
|
@ -45,34 +45,28 @@ public class ElasticsearchNode implements ElasticsearchConfiguration {
|
|||
this.services = services;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Version getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setVersion(Version version) {
|
||||
checkNotRunning();
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Distribution getDistribution() {
|
||||
return distribution;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setDistribution(Distribution distribution) {
|
||||
checkNotRunning();
|
||||
this.distribution = distribution;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void claim() {
|
||||
noOfClaims.incrementAndGet();
|
||||
}
|
||||
|
@ -82,7 +76,6 @@ public class ElasticsearchNode implements ElasticsearchConfiguration {
|
|||
*
|
||||
* @return future of thread running in the background
|
||||
*/
|
||||
@Override
|
||||
public Future<Void> start() {
|
||||
if (started.getAndSet(true)) {
|
||||
logger.lifecycle("Already started cluster: {}", name);
|
||||
|
@ -95,7 +88,6 @@ public class ElasticsearchNode implements ElasticsearchConfiguration {
|
|||
/**
|
||||
* Stops a running cluster if it's not claimed. Does nothing otherwise.
|
||||
*/
|
||||
@Override
|
||||
public void unClaimAndStop() {
|
||||
int decrementedClaims = noOfClaims.decrementAndGet();
|
||||
if (decrementedClaims > 0) {
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
NamedDomainObjectContainer<? extends ElasticsearchConfiguration> container = project.container(
|
||||
NamedDomainObjectContainer<? extends ElasticsearchNode> container = project.container(
|
||||
ElasticsearchNode.class,
|
||||
(name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project))
|
||||
);
|
||||
|
@ -56,12 +56,12 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
listTask.setGroup("ES cluster formation");
|
||||
listTask.setDescription("Lists all ES clusters configured for this project");
|
||||
listTask.doLast((Task task) ->
|
||||
container.forEach((ElasticsearchConfiguration cluster) ->
|
||||
container.forEach((ElasticsearchNode cluster) ->
|
||||
logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution())
|
||||
)
|
||||
);
|
||||
|
||||
Map<Task, List<ElasticsearchConfiguration>> taskToCluster = new HashMap<>();
|
||||
Map<Task, List<ElasticsearchNode>> taskToCluster = new HashMap<>();
|
||||
|
||||
// register an extension for all current and future tasks, so that any task can declare that it wants to use a
|
||||
// specific cluster.
|
||||
|
@ -70,7 +70,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
.set(
|
||||
"useCluster",
|
||||
new Closure<Void>(this, this) {
|
||||
public void doCall(ElasticsearchConfiguration conf) {
|
||||
public void doCall(ElasticsearchNode conf) {
|
||||
taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf);
|
||||
}
|
||||
})
|
||||
|
@ -79,7 +79,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
project.getGradle().getTaskGraph().whenReady(taskExecutionGraph ->
|
||||
taskExecutionGraph.getAllTasks()
|
||||
.forEach(task ->
|
||||
taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchConfiguration::claim)
|
||||
taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchNode::claim)
|
||||
)
|
||||
);
|
||||
project.getGradle().addListener(
|
||||
|
@ -87,7 +87,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
@Override
|
||||
public void beforeActions(Task task) {
|
||||
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
|
||||
taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::start);
|
||||
taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchNode::start);
|
||||
}
|
||||
@Override
|
||||
public void afterActions(Task task) {}
|
||||
|
@ -99,7 +99,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
public void afterExecute(Task task, TaskState state) {
|
||||
// always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the
|
||||
// cluster to start.
|
||||
taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::unClaimAndStop);
|
||||
taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchNode::unClaimAndStop);
|
||||
}
|
||||
@Override
|
||||
public void beforeExecute(Task task) {}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
|
@ -52,7 +52,7 @@ dependencies {
|
|||
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
|
||||
}
|
||||
|
||||
tasks.withType(ForbiddenApisCliTask) {
|
||||
tasks.withType(CheckForbiddenApis) {
|
||||
//client does not depend on server, so only jdk and http signatures should be checked
|
||||
replaceSignatureFiles ('jdk-signatures', 'http-signatures')
|
||||
}
|
||||
|
|
|
@ -16,10 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
|
@ -32,7 +29,7 @@ dependencies {
|
|||
|
||||
archivesBaseName = 'elasticsearch-launchers'
|
||||
|
||||
tasks.withType(ForbiddenApisCliTask) {
|
||||
tasks.withType(CheckForbiddenApis) {
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,11 @@ POST /kimchy/_forcemerge?only_expunge_deletes=false&max_num_segments=100&flush=t
|
|||
=== Multi Index
|
||||
|
||||
The force merge API can be applied to more than one index with a single call, or
|
||||
even on `_all` the indices.
|
||||
even on `_all` the indices. Multi index operations are executed one shard at a
|
||||
time per node. Force merge makes the storage for the shard being merged
|
||||
temporarily increase, up to double its size in case `max_num_segments` is set
|
||||
to `1`, as all segments need to be rewritten into a new one.
|
||||
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -721,12 +721,30 @@ All processors are defined in the following way within a pipeline definition:
|
|||
// NOTCONSOLE
|
||||
|
||||
Each processor defines its own configuration parameters, but all processors have
|
||||
the ability to declare `tag` and `on_failure` fields. These fields are optional.
|
||||
the ability to declare `tag`, `on_failure` and `if` fields. These fields are optional.
|
||||
|
||||
A `tag` is simply a string identifier of the specific instantiation of a certain
|
||||
processor in a pipeline. The `tag` field does not affect the processor's behavior,
|
||||
but is very useful for bookkeeping and tracing errors to specific processors.
|
||||
|
||||
The `if` field must contain a script that returns a boolean value. If the script evaluates to `true`
|
||||
then the processor will be executed for the given document otherwise it will be skipped.
|
||||
The `if` field takes an object with the script fields defined in <<script-processor, script-options>>
|
||||
and accesses a read only version of the document via the same `ctx` variable used by scripts in the
|
||||
<<script-processor>>.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"set": {
|
||||
"if": "ctx.bar == 'expectedValue'",
|
||||
"field": "foo",
|
||||
"value": "bar"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
See <<handling-failure-in-pipelines>> to learn more about the `on_failure` field and error handling in pipelines.
|
||||
|
||||
The <<ingest-info,node info API>> can be used to figure out what processors are available in a cluster.
|
||||
|
|
|
@ -40,7 +40,7 @@ GET /_xpack/migration/assistance
|
|||
// CONSOLE
|
||||
// TEST[skip:cannot create an old index in docs test]
|
||||
|
||||
A successful call returns a list of indices that need to updated or reindexed:
|
||||
A successful call returns a list of indices that need to be updated or reindexed:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -73,7 +73,7 @@ GET /_xpack/migration/assistance/my_*
|
|||
// CONSOLE
|
||||
// TEST[skip:cannot create an old index in docs test]
|
||||
|
||||
A successful call returns a list of indices that needs to updated or reindexed
|
||||
A successful call returns a list of indices that needs to be updated or reindexed
|
||||
and match the index specified on the endpoint:
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
|
||||
experimental[]
|
||||
|
||||
This API deletes an existing rollup job. The job can be started or stopped, in both cases it will be deleted. Attempting
|
||||
to delete a non-existing job will throw an exception
|
||||
This API deletes an existing rollup job. A job must be *stopped* first before it can be deleted. Attempting to delete
|
||||
a started job will result in an error. Similarly, attempting to delete a nonexistent job will throw an exception.
|
||||
|
||||
.Deleting the job does not delete rolled up data
|
||||
**********************************
|
||||
|
@ -99,12 +99,12 @@ A 404 `resource_not_found` exception will be thrown:
|
|||
"root_cause" : [
|
||||
{
|
||||
"type" : "resource_not_found_exception",
|
||||
"reason" : "the task with id does_not_exist doesn't exist",
|
||||
"reason" : "the task with id [does_not_exist] doesn't exist",
|
||||
"stack_trace": ...
|
||||
}
|
||||
],
|
||||
"type" : "resource_not_found_exception",
|
||||
"reason" : "the task with id does_not_exist doesn't exist",
|
||||
"reason" : "the task with id [does_not_exist] doesn't exist",
|
||||
"stack_trace": ...
|
||||
},
|
||||
"status": 404
|
||||
|
|
|
@ -6,7 +6,7 @@ creation in {es}, you must configure
|
|||
|
||||
[source,yaml]
|
||||
-----------------------------------------------------------
|
||||
action.auto_create_index: .security,.monitoring*,.watches,.triggered_watches,.watcher-history*,.ml*
|
||||
action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml*
|
||||
-----------------------------------------------------------
|
||||
|
||||
[IMPORTANT]
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
[[sql-operators]]
|
||||
=== Comparison Operators
|
||||
|
||||
Boolean operator for comparing one or two expressions.
|
||||
Boolean operator for comparing against one or multiple expressions.
|
||||
|
||||
* Equality (`=`)
|
||||
|
||||
|
@ -40,6 +40,13 @@ include-tagged::{sql-specs}/filter.sql-spec[whereBetween]
|
|||
include-tagged::{sql-specs}/filter.sql-spec[whereIsNotNullAndIsNull]
|
||||
--------------------------------------------------
|
||||
|
||||
* `IN (<value1>, <value2>, ...)`
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/filter.sql-spec[whereWithInAndMultipleValues]
|
||||
--------------------------------------------------
|
||||
|
||||
[[sql-operators-logical]]
|
||||
=== Logical Operators
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ CAST ( expression<1> AS data_type<2> )
|
|||
|
||||
.Description
|
||||
|
||||
Casts the result of the given expression to the target type.
|
||||
Casts the result of the given expression to the target <<sql-data-types, data type>>.
|
||||
If the cast is not possible (for example because of target type is too narrow or because
|
||||
the value itself cannot be converted), the query fails.
|
||||
|
||||
|
@ -37,3 +37,32 @@ include-tagged::{sql-specs}/docs.csv-spec[conversionIntToStringCast]
|
|||
----
|
||||
include-tagged::{sql-specs}/docs.csv-spec[conversionStringToDateCast]
|
||||
----
|
||||
|
||||
|
||||
[[sql-functions-type-conversion-convert]]
|
||||
==== `CONVERT`
|
||||
|
||||
.Synopsis
|
||||
[source, sql]
|
||||
----
|
||||
CONVERT ( expression<1>, data_type<2> )
|
||||
----
|
||||
|
||||
<1> Expression to convert
|
||||
<2> Target data type to convert to
|
||||
|
||||
.Description
|
||||
|
||||
Works exactly like <<sql-functions-type-conversion-cast>> with slightly different syntax.
|
||||
Moreover, apart from the standard <<sql-data-types, data types>> it supports the corresponding
|
||||
https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/explicit-data-type-conversion-function?view=sql-server-2017[ODBC data types].
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
----
|
||||
include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntConvertODBCDataType]
|
||||
----
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
----
|
||||
include-tagged::{sql-specs}/docs.csv-spec[conversionStringToIntConvertESDataType]
|
||||
----
|
||||
|
|
|
@ -34,6 +34,6 @@ indices:
|
|||
|
||||
["source","yaml",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-tests}/security/roles.yml[cli_jdbc]
|
||||
include-tagged::{sql-tests}/security/roles.yml[cli_drivers]
|
||||
--------------------------------------------------
|
||||
|
||||
|
|
|
@ -48,8 +48,7 @@ if (!isEclipse && !isIdea) {
|
|||
|
||||
forbiddenApisJava9 {
|
||||
if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) {
|
||||
targetCompatibility = JavaVersion.VERSION_1_9
|
||||
javaHome = project.java9Home
|
||||
targetCompatibility = JavaVersion.VERSION_1_9.getMajorVersion()
|
||||
}
|
||||
replaceSignatureFiles 'jdk-signatures'
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ teardown:
|
|||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "inner"
|
||||
"name": "inner"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -78,7 +78,7 @@ teardown:
|
|||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "inner"
|
||||
"name": "inner"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -94,7 +94,7 @@ teardown:
|
|||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "outer"
|
||||
"name": "outer"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
|
@ -617,7 +617,7 @@ teardown:
|
|||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "inner"
|
||||
"name": "inner"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -633,7 +633,7 @@ teardown:
|
|||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "outer"
|
||||
"name": "outer"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -641,7 +641,6 @@ teardown:
|
|||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
catch: /illegal_state_exception/
|
||||
ingest.simulate:
|
||||
verbose: true
|
||||
body: >
|
||||
|
@ -650,7 +649,7 @@ teardown:
|
|||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "outer"
|
||||
"name": "outer"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -667,8 +666,10 @@ teardown:
|
|||
}
|
||||
]
|
||||
}
|
||||
- match: { error.root_cause.0.type: "illegal_state_exception" }
|
||||
- match: { error.root_cause.0.reason: "Cycle detected for pipeline: inner" }
|
||||
- length: { docs: 1 }
|
||||
- length: { docs.0.processor_results: 1 }
|
||||
- match: { docs.0.processor_results.0.error.reason: "java.lang.IllegalArgumentException: java.lang.IllegalStateException: Cycle detected for pipeline: outer" }
|
||||
- match: { docs.0.processor_results.0.error.caused_by.caused_by.reason: "Cycle detected for pipeline: outer" }
|
||||
|
||||
---
|
||||
"Test verbose simulate with Pipeline Processor with Multiple Pipelines":
|
||||
|
@ -686,7 +687,7 @@ teardown:
|
|||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"pipeline": "pipeline2"
|
||||
"name": "pipeline2"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -724,7 +725,7 @@ teardown:
|
|||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"pipeline": "pipeline1"
|
||||
"name": "pipeline1"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
|
@ -25,7 +25,7 @@ esplugin {
|
|||
hasClientJar = true
|
||||
}
|
||||
|
||||
tasks.withType(ForbiddenApisCliTask) {
|
||||
tasks.withType(CheckForbiddenApis) {
|
||||
signatures += [
|
||||
"com.ibm.icu.text.Collator#getInstance() @ Don't use default locale, use getInstance(ULocale) instead"
|
||||
]
|
||||
|
|
|
@ -61,8 +61,7 @@ if (!isEclipse && !isIdea) {
|
|||
|
||||
forbiddenApisJava9 {
|
||||
if (project.runtimeJavaVersion < JavaVersion.VERSION_1_9) {
|
||||
targetCompatibility = JavaVersion.VERSION_1_9
|
||||
javaHome = project.java9Home
|
||||
targetCompatibility = JavaVersion.VERSION_1_9.getMajorVersion()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -51,8 +51,6 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
|
|||
*/
|
||||
public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject {
|
||||
private static final String TASKS = "tasks";
|
||||
private static final String TASK_FAILURES = "task_failures";
|
||||
private static final String NODE_FAILURES = "node_failures";
|
||||
|
||||
private List<TaskInfo> tasks;
|
||||
|
||||
|
@ -246,28 +244,6 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
|
|||
return builder;
|
||||
}
|
||||
|
||||
private void toXContentCommon(XContentBuilder builder, Params params) throws IOException {
|
||||
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
|
||||
builder.startArray(TASK_FAILURES);
|
||||
for (TaskOperationFailure ex : getTaskFailures()){
|
||||
builder.startObject();
|
||||
builder.value(ex);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
|
||||
builder.startArray(NODE_FAILURES);
|
||||
for (ElasticsearchException ex : getNodeFailures()) {
|
||||
builder.startObject();
|
||||
ex.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
}
|
||||
|
||||
public static ListTasksResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
|
|
@ -21,17 +21,13 @@ package org.elasticsearch.action.ingest;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.ingest.CompoundProcessor;
|
||||
import org.elasticsearch.ingest.IngestDocument;
|
||||
import org.elasticsearch.ingest.Pipeline;
|
||||
import org.elasticsearch.ingest.CompoundProcessor;
|
||||
import org.elasticsearch.ingest.PipelineProcessor;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.ingest.TrackingResultProcessor.decorate;
|
||||
|
||||
|
@ -46,11 +42,9 @@ class SimulateExecutionService {
|
|||
}
|
||||
|
||||
SimulateDocumentResult executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean verbose) {
|
||||
// Prevent cycles in pipeline decoration
|
||||
final Set<PipelineProcessor> pipelinesSeen = Collections.newSetFromMap(new IdentityHashMap<>());
|
||||
if (verbose) {
|
||||
List<SimulateProcessorResult> processorResultList = new ArrayList<>();
|
||||
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList, pipelinesSeen);
|
||||
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList);
|
||||
try {
|
||||
Pipeline verbosePipeline = new Pipeline(pipeline.getId(), pipeline.getDescription(), pipeline.getVersion(),
|
||||
verbosePipelineProcessor);
|
||||
|
|
|
@ -25,12 +25,15 @@ import org.elasticsearch.action.FailedNodeException;
|
|||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
@ -41,6 +44,9 @@ import static org.elasticsearch.ExceptionsHelper.rethrowAndSuppress;
|
|||
* Base class for responses of task-related operations
|
||||
*/
|
||||
public class BaseTasksResponse extends ActionResponse {
|
||||
protected static final String TASK_FAILURES = "task_failures";
|
||||
protected static final String NODE_FAILURES = "node_failures";
|
||||
|
||||
private List<TaskOperationFailure> taskFailures;
|
||||
private List<ElasticsearchException> nodeFailures;
|
||||
|
||||
|
@ -103,4 +109,44 @@ public class BaseTasksResponse extends ActionResponse {
|
|||
exp.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
protected void toXContentCommon(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
|
||||
builder.startArray(TASK_FAILURES);
|
||||
for (TaskOperationFailure ex : getTaskFailures()){
|
||||
builder.startObject();
|
||||
builder.value(ex);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
||||
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
|
||||
builder.startArray(NODE_FAILURES);
|
||||
for (ElasticsearchException ex : getNodeFailures()) {
|
||||
builder.startObject();
|
||||
ex.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
BaseTasksResponse response = (BaseTasksResponse) o;
|
||||
return taskFailures.equals(response.taskFailures)
|
||||
&& nodeFailures.equals(response.nodeFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(taskFailures, nodeFailures);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,27 +27,27 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.time.DateFormatter;
|
||||
import org.elasticsearch.common.time.DateFormatters;
|
||||
import org.elasticsearch.common.time.DateMathParser;
|
||||
import org.elasticsearch.common.time.DateUtils;
|
||||
import org.elasticsearch.common.time.JavaDateMathParser;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.format.DateTimeFormat;
|
||||
import org.joda.time.format.DateTimeFormatter;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneId;
|
||||
import java.time.ZoneOffset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
|
@ -62,7 +62,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
public IndexNameExpressionResolver(Settings settings) {
|
||||
super(settings);
|
||||
expressionResolvers = Arrays.asList(
|
||||
dateMathExpressionResolver = new DateMathExpressionResolver(settings),
|
||||
dateMathExpressionResolver = new DateMathExpressionResolver(),
|
||||
new WildcardExpressionResolver()
|
||||
);
|
||||
}
|
||||
|
@ -815,6 +815,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
|
||||
static final class DateMathExpressionResolver implements ExpressionResolver {
|
||||
|
||||
private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatters.forPattern("uuuu.MM.dd");
|
||||
private static final String EXPRESSION_LEFT_BOUND = "<";
|
||||
private static final String EXPRESSION_RIGHT_BOUND = ">";
|
||||
private static final char LEFT_BOUND = '{';
|
||||
|
@ -822,17 +823,6 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
private static final char ESCAPE_CHAR = '\\';
|
||||
private static final char TIME_ZONE_BOUND = '|';
|
||||
|
||||
private final DateTimeZone defaultTimeZone;
|
||||
private final String defaultDateFormatterPattern;
|
||||
private final DateTimeFormatter defaultDateFormatter;
|
||||
|
||||
DateMathExpressionResolver(Settings settings) {
|
||||
String defaultTimeZoneId = settings.get("date_math_expression_resolver.default_time_zone", "UTC");
|
||||
this.defaultTimeZone = DateTimeZone.forID(defaultTimeZoneId);
|
||||
defaultDateFormatterPattern = settings.get("date_math_expression_resolver.default_date_format", "YYYY.MM.dd");
|
||||
this.defaultDateFormatter = DateTimeFormat.forPattern(defaultDateFormatterPattern);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> resolve(final Context context, List<String> expressions) {
|
||||
List<String> result = new ArrayList<>(expressions.size());
|
||||
|
@ -896,13 +886,12 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
int dateTimeFormatLeftBoundIndex = inPlaceHolderString.indexOf(LEFT_BOUND);
|
||||
String mathExpression;
|
||||
String dateFormatterPattern;
|
||||
DateTimeFormatter dateFormatter;
|
||||
final DateTimeZone timeZone;
|
||||
DateFormatter dateFormatter;
|
||||
final ZoneId timeZone;
|
||||
if (dateTimeFormatLeftBoundIndex < 0) {
|
||||
mathExpression = inPlaceHolderString;
|
||||
dateFormatterPattern = defaultDateFormatterPattern;
|
||||
dateFormatter = defaultDateFormatter;
|
||||
timeZone = defaultTimeZone;
|
||||
dateFormatter = DEFAULT_DATE_FORMATTER;
|
||||
timeZone = ZoneOffset.UTC;
|
||||
} else {
|
||||
if (inPlaceHolderString.lastIndexOf(RIGHT_BOUND) != inPlaceHolderString.length() - 1) {
|
||||
throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing closing `}` for date math format", inPlaceHolderString);
|
||||
|
@ -915,20 +904,18 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
int formatPatternTimeZoneSeparatorIndex = dateFormatterPatternAndTimeZoneId.indexOf(TIME_ZONE_BOUND);
|
||||
if (formatPatternTimeZoneSeparatorIndex != -1) {
|
||||
dateFormatterPattern = dateFormatterPatternAndTimeZoneId.substring(0, formatPatternTimeZoneSeparatorIndex);
|
||||
timeZone = DateTimeZone.forID(dateFormatterPatternAndTimeZoneId.substring(formatPatternTimeZoneSeparatorIndex + 1));
|
||||
timeZone = ZoneId.of(dateFormatterPatternAndTimeZoneId.substring(formatPatternTimeZoneSeparatorIndex + 1));
|
||||
} else {
|
||||
dateFormatterPattern = dateFormatterPatternAndTimeZoneId;
|
||||
timeZone = defaultTimeZone;
|
||||
timeZone = ZoneOffset.UTC;
|
||||
}
|
||||
dateFormatter = DateTimeFormat.forPattern(dateFormatterPattern);
|
||||
dateFormatter = DateFormatters.forPattern(dateFormatterPattern);
|
||||
}
|
||||
DateTimeFormatter parser = dateFormatter.withZone(timeZone);
|
||||
FormatDateTimeFormatter formatter = new FormatDateTimeFormatter(dateFormatterPattern, parser, Locale.ROOT);
|
||||
DateMathParser dateMathParser = formatter.toDateMathParser();
|
||||
long millis = dateMathParser.parse(mathExpression, context::getStartTime, false,
|
||||
DateUtils.dateTimeZoneToZoneId(timeZone));
|
||||
DateFormatter formatter = dateFormatter.withZone(timeZone);
|
||||
DateMathParser dateMathParser = new JavaDateMathParser(formatter);
|
||||
long millis = dateMathParser.parse(mathExpression, context::getStartTime, false, timeZone);
|
||||
|
||||
String time = formatter.printer().print(millis);
|
||||
String time = formatter.format(Instant.ofEpochMilli(millis));
|
||||
beforePlaceHolderSb.append(time);
|
||||
inPlaceHolderSb = new StringBuilder();
|
||||
inPlaceHolder = false;
|
||||
|
@ -968,18 +955,4 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
return beforePlaceHolderSb.toString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given expression resolves to the given index name otherwise <code>false</code>
|
||||
*/
|
||||
public final boolean matchesIndex(String indexName, String expression, ClusterState state) {
|
||||
final String[] concreteIndices = concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), expression);
|
||||
for (String index : concreteIndices) {
|
||||
if (Regex.simpleMatch(index, indexName)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return indexName.equals(expression);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -249,53 +249,45 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
}
|
||||
|
||||
/**
|
||||
* Finds the specific index aliases that point to the specified concrete indices or match partially with the indices via wildcards.
|
||||
* Finds the specific index aliases that point to the requested concrete indices directly
|
||||
* or that match with the indices via wildcards.
|
||||
*
|
||||
* @param concreteIndices The concrete indexes the index aliases must point to order to be returned.
|
||||
* @return a map of index to a list of alias metadata, the list corresponding to a concrete index will be empty if no aliases are
|
||||
* present for that index
|
||||
* @param concreteIndices The concrete indices that the aliases must point to in order to be returned.
|
||||
* @return A map of index name to the list of aliases metadata. If a concrete index does not have matching
|
||||
* aliases then the result will <b>not</b> include the index's key.
|
||||
*/
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> findAllAliases(String[] concreteIndices) {
|
||||
return findAliases(Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY, concreteIndices);
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> findAllAliases(final String[] concreteIndices) {
|
||||
return findAliases(Strings.EMPTY_ARRAY, concreteIndices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the specific index aliases that match with the specified aliases directly or partially via wildcards and
|
||||
* that point to the specified concrete indices or match partially with the indices via wildcards.
|
||||
* Finds the specific index aliases that match with the specified aliases directly or partially via wildcards, and
|
||||
* that point to the specified concrete indices (directly or matching indices via wildcards).
|
||||
*
|
||||
* @param aliasesRequest The request to find aliases for
|
||||
* @param concreteIndices The concrete indexes the index aliases must point to order to be returned.
|
||||
* @return a map of index to a list of alias metadata, the list corresponding to a concrete index will be empty if no aliases are
|
||||
* present for that index
|
||||
* @param concreteIndices The concrete indices that the aliases must point to in order to be returned.
|
||||
* @return A map of index name to the list of aliases metadata. If a concrete index does not have matching
|
||||
* aliases then the result will <b>not</b> include the index's key.
|
||||
*/
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> findAliases(final AliasesRequest aliasesRequest, String[] concreteIndices) {
|
||||
return findAliases(aliasesRequest.getOriginalAliases(), aliasesRequest.aliases(), concreteIndices);
|
||||
public ImmutableOpenMap<String, List<AliasMetaData>> findAliases(final AliasesRequest aliasesRequest, final String[] concreteIndices) {
|
||||
return findAliases(aliasesRequest.aliases(), concreteIndices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds the specific index aliases that match with the specified aliases directly or partially via wildcards and
|
||||
* that point to the specified concrete indices or match partially with the indices via wildcards.
|
||||
* Finds the specific index aliases that match with the specified aliases directly or partially via wildcards, and
|
||||
* that point to the specified concrete indices (directly or matching indices via wildcards).
|
||||
*
|
||||
* @param aliases The aliases to look for
|
||||
* @param originalAliases The original aliases that the user originally requested
|
||||
* @param concreteIndices The concrete indexes the index aliases must point to order to be returned.
|
||||
* @return a map of index to a list of alias metadata, the list corresponding to a concrete index will be empty if no aliases are
|
||||
* present for that index
|
||||
* @param aliases The aliases to look for. Might contain include or exclude wildcards.
|
||||
* @param concreteIndices The concrete indices that the aliases must point to in order to be returned
|
||||
* @return A map of index name to the list of aliases metadata. If a concrete index does not have matching
|
||||
* aliases then the result will <b>not</b> include the index's key.
|
||||
*/
|
||||
private ImmutableOpenMap<String, List<AliasMetaData>> findAliases(String[] originalAliases, String[] aliases,
|
||||
String[] concreteIndices) {
|
||||
private ImmutableOpenMap<String, List<AliasMetaData>> findAliases(final String[] aliases, final String[] concreteIndices) {
|
||||
assert aliases != null;
|
||||
assert originalAliases != null;
|
||||
assert concreteIndices != null;
|
||||
if (concreteIndices.length == 0) {
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
|
||||
//if aliases were provided but they got replaced with empty aliases, return empty map
|
||||
if (originalAliases.length > 0 && aliases.length == 0) {
|
||||
return ImmutableOpenMap.of();
|
||||
}
|
||||
|
||||
String[] patterns = new String[aliases.length];
|
||||
boolean[] include = new boolean[aliases.length];
|
||||
for (int i = 0; i < aliases.length; i++) {
|
||||
|
@ -331,7 +323,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
filteredValues.add(value);
|
||||
}
|
||||
}
|
||||
|
||||
if (filteredValues.isEmpty() == false) {
|
||||
// Make the list order deterministic
|
||||
CollectionUtil.timSort(filteredValues, Comparator.comparing(AliasMetaData::alias));
|
||||
|
|
|
@ -20,12 +20,15 @@
|
|||
package org.elasticsearch.ingest;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
@ -40,16 +43,33 @@ public class CompoundProcessor implements Processor {
|
|||
private final boolean ignoreFailure;
|
||||
private final List<Processor> processors;
|
||||
private final List<Processor> onFailureProcessors;
|
||||
private final List<Tuple<Processor, IngestMetric>> processorsWithMetrics;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
CompoundProcessor(LongSupplier relativeTimeProvider, Processor... processor) {
|
||||
this(false, Arrays.asList(processor), Collections.emptyList(), relativeTimeProvider);
|
||||
}
|
||||
|
||||
public CompoundProcessor(Processor... processor) {
|
||||
this(false, Arrays.asList(processor), Collections.emptyList());
|
||||
}
|
||||
|
||||
public CompoundProcessor(boolean ignoreFailure, List<Processor> processors, List<Processor> onFailureProcessors) {
|
||||
this(ignoreFailure, processors, onFailureProcessors, System::nanoTime);
|
||||
}
|
||||
CompoundProcessor(boolean ignoreFailure, List<Processor> processors, List<Processor> onFailureProcessors,
|
||||
LongSupplier relativeTimeProvider) {
|
||||
super();
|
||||
this.ignoreFailure = ignoreFailure;
|
||||
this.processors = processors;
|
||||
this.onFailureProcessors = onFailureProcessors;
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
this.processorsWithMetrics = new ArrayList<>(processors.size());
|
||||
processors.forEach(p -> processorsWithMetrics.add(new Tuple<>(p, new IngestMetric())));
|
||||
}
|
||||
|
||||
List<Tuple<Processor, IngestMetric>> getProcessorsWithMetrics() {
|
||||
return processorsWithMetrics;
|
||||
}
|
||||
|
||||
public boolean isIgnoreFailure() {
|
||||
|
@ -94,12 +114,17 @@ public class CompoundProcessor implements Processor {
|
|||
|
||||
@Override
|
||||
public IngestDocument execute(IngestDocument ingestDocument) throws Exception {
|
||||
for (Processor processor : processors) {
|
||||
for (Tuple<Processor, IngestMetric> processorWithMetric : processorsWithMetrics) {
|
||||
Processor processor = processorWithMetric.v1();
|
||||
IngestMetric metric = processorWithMetric.v2();
|
||||
long startTimeInNanos = relativeTimeProvider.getAsLong();
|
||||
try {
|
||||
metric.preIngest();
|
||||
if (processor.execute(ingestDocument) == null) {
|
||||
return null;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
metric.ingestFailed();
|
||||
if (ignoreFailure) {
|
||||
continue;
|
||||
}
|
||||
|
@ -112,11 +137,15 @@ public class CompoundProcessor implements Processor {
|
|||
executeOnFailure(ingestDocument, compoundProcessorException);
|
||||
break;
|
||||
}
|
||||
} finally {
|
||||
long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos);
|
||||
metric.postIngest(ingestTimeInMillis);
|
||||
}
|
||||
}
|
||||
return ingestDocument;
|
||||
}
|
||||
|
||||
|
||||
void executeOnFailure(IngestDocument ingestDocument, ElasticsearchException exception) throws Exception {
|
||||
try {
|
||||
putFailureMetadata(ingestDocument, exception);
|
||||
|
|
|
@ -28,6 +28,8 @@ import java.util.List;
|
|||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.elasticsearch.script.IngestConditionalScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -42,24 +44,54 @@ public class ConditionalProcessor extends AbstractProcessor {
|
|||
private final ScriptService scriptService;
|
||||
|
||||
private final Processor processor;
|
||||
private final IngestMetric metric;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
ConditionalProcessor(String tag, Script script, ScriptService scriptService, Processor processor) {
|
||||
this(tag, script, scriptService, processor, System::nanoTime);
|
||||
}
|
||||
|
||||
ConditionalProcessor(String tag, Script script, ScriptService scriptService, Processor processor, LongSupplier relativeTimeProvider) {
|
||||
super(tag);
|
||||
this.condition = script;
|
||||
this.scriptService = scriptService;
|
||||
this.processor = processor;
|
||||
this.metric = new IngestMetric();
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IngestDocument execute(IngestDocument ingestDocument) throws Exception {
|
||||
IngestConditionalScript script =
|
||||
scriptService.compile(condition, IngestConditionalScript.CONTEXT).newInstance(condition.getParams());
|
||||
if (script.execute(new UnmodifiableIngestData(ingestDocument.getSourceAndMetadata()))) {
|
||||
if (evaluate(ingestDocument)) {
|
||||
long startTimeInNanos = relativeTimeProvider.getAsLong();
|
||||
try {
|
||||
metric.preIngest();
|
||||
return processor.execute(ingestDocument);
|
||||
} catch (Exception e) {
|
||||
metric.ingestFailed();
|
||||
throw e;
|
||||
} finally {
|
||||
long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos);
|
||||
metric.postIngest(ingestTimeInMillis);
|
||||
}
|
||||
}
|
||||
return ingestDocument;
|
||||
}
|
||||
|
||||
boolean evaluate(IngestDocument ingestDocument) {
|
||||
IngestConditionalScript script =
|
||||
scriptService.compile(condition, IngestConditionalScript.CONTEXT).newInstance(condition.getParams());
|
||||
return script.execute(new UnmodifiableIngestData(ingestDocument.getSourceAndMetadata()));
|
||||
}
|
||||
|
||||
Processor getProcessor() {
|
||||
return processor;
|
||||
}
|
||||
|
||||
IngestMetric getMetric() {
|
||||
return metric;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return TYPE;
|
||||
|
|
|
@ -19,19 +19,6 @@
|
|||
|
||||
package org.elasticsearch.ingest;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
|
@ -49,6 +36,7 @@ import org.elasticsearch.cluster.ClusterStateApplier;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
|
@ -61,6 +49,19 @@ import org.elasticsearch.plugins.IngestPlugin;
|
|||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Holder class for several ingest related services.
|
||||
*/
|
||||
|
@ -262,11 +263,59 @@ public class IngestService implements ClusterStateApplier {
|
|||
Pipeline originalPipeline = originalPipelines.get(id);
|
||||
if (originalPipeline != null) {
|
||||
pipeline.getMetrics().add(originalPipeline.getMetrics());
|
||||
List<Tuple<Processor, IngestMetric>> oldPerProcessMetrics = new ArrayList<>();
|
||||
List<Tuple<Processor, IngestMetric>> newPerProcessMetrics = new ArrayList<>();
|
||||
getProcessorMetrics(originalPipeline.getCompoundProcessor(), oldPerProcessMetrics);
|
||||
getProcessorMetrics(pipeline.getCompoundProcessor(), newPerProcessMetrics);
|
||||
//Best attempt to populate new processor metrics using a parallel array of the old metrics. This is not ideal since
|
||||
//the per processor metrics may get reset when the arrays don't match. However, to get to an ideal model, unique and
|
||||
//consistent id's per processor and/or semantic equals for each processor will be needed.
|
||||
if (newPerProcessMetrics.size() == oldPerProcessMetrics.size()) {
|
||||
Iterator<Tuple<Processor, IngestMetric>> oldMetricsIterator = oldPerProcessMetrics.iterator();
|
||||
for (Tuple<Processor, IngestMetric> compositeMetric : newPerProcessMetrics) {
|
||||
String type = compositeMetric.v1().getType();
|
||||
IngestMetric metric = compositeMetric.v2();
|
||||
if (oldMetricsIterator.hasNext()) {
|
||||
Tuple<Processor, IngestMetric> oldCompositeMetric = oldMetricsIterator.next();
|
||||
String oldType = oldCompositeMetric.v1().getType();
|
||||
IngestMetric oldMetric = oldCompositeMetric.v2();
|
||||
if (type.equals(oldType)) {
|
||||
metric.add(oldMetric);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursive method to obtain all of the non-failure processors for given compoundProcessor. Since conditionals are implemented as
|
||||
* wrappers to the actual processor, always prefer the actual processor's metric over the conditional processor's metric.
|
||||
* @param compoundProcessor The compound processor to start walking the non-failure processors
|
||||
* @param processorMetrics The list of {@link Processor} {@link IngestMetric} tuples.
|
||||
* @return the processorMetrics for all non-failure processor that belong to the original compoundProcessor
|
||||
*/
|
||||
private static List<Tuple<Processor, IngestMetric>> getProcessorMetrics(CompoundProcessor compoundProcessor,
|
||||
List<Tuple<Processor, IngestMetric>> processorMetrics) {
|
||||
//only surface the top level non-failure processors, on-failure processor times will be included in the top level non-failure
|
||||
for (Tuple<Processor, IngestMetric> processorWithMetric : compoundProcessor.getProcessorsWithMetrics()) {
|
||||
Processor processor = processorWithMetric.v1();
|
||||
IngestMetric metric = processorWithMetric.v2();
|
||||
if (processor instanceof CompoundProcessor) {
|
||||
getProcessorMetrics((CompoundProcessor) processor, processorMetrics);
|
||||
} else {
|
||||
//Prefer the conditional's metric since it only includes metrics when the conditional evaluated to true.
|
||||
if (processor instanceof ConditionalProcessor) {
|
||||
metric = ((ConditionalProcessor) processor).getMetric();
|
||||
}
|
||||
processorMetrics.add(new Tuple<>(processor, metric));
|
||||
}
|
||||
}
|
||||
return processorMetrics;
|
||||
}
|
||||
|
||||
private static Pipeline substitutePipeline(String id, ElasticsearchParseException e) {
|
||||
String tag = e.getHeaderKeys().contains("processor_tag") ? e.getHeader("processor_tag").get(0) : null;
|
||||
String type = e.getHeaderKeys().contains("processor_type") ? e.getHeader("processor_type").get(0) : "unknown";
|
||||
|
@ -371,11 +420,42 @@ public class IngestService implements ClusterStateApplier {
|
|||
}
|
||||
|
||||
public IngestStats stats() {
|
||||
IngestStats.Builder statsBuilder = new IngestStats.Builder();
|
||||
statsBuilder.addTotalMetrics(totalMetrics);
|
||||
pipelines.forEach((id, pipeline) -> {
|
||||
CompoundProcessor rootProcessor = pipeline.getCompoundProcessor();
|
||||
statsBuilder.addPipelineMetrics(id, pipeline.getMetrics());
|
||||
List<Tuple<Processor, IngestMetric>> processorMetrics = new ArrayList<>();
|
||||
getProcessorMetrics(rootProcessor, processorMetrics);
|
||||
processorMetrics.forEach(t -> {
|
||||
Processor processor = t.v1();
|
||||
IngestMetric processorMetric = t.v2();
|
||||
statsBuilder.addProcessorMetrics(id, getProcessorName(processor), processorMetric);
|
||||
});
|
||||
});
|
||||
return statsBuilder.build();
|
||||
}
|
||||
|
||||
Map<String, IngestStats.Stats> statsPerPipeline =
|
||||
pipelines.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, v -> v.getValue().getMetrics().createStats()));
|
||||
//package private for testing
|
||||
static String getProcessorName(Processor processor){
|
||||
// conditionals are implemented as wrappers around the real processor, so get the real processor for the correct type for the name
|
||||
if(processor instanceof ConditionalProcessor){
|
||||
processor = ((ConditionalProcessor) processor).getProcessor();
|
||||
}
|
||||
StringBuilder sb = new StringBuilder(5);
|
||||
sb.append(processor.getType());
|
||||
|
||||
return new IngestStats(totalMetrics.createStats(), statsPerPipeline);
|
||||
if(processor instanceof PipelineProcessor){
|
||||
String pipelineName = ((PipelineProcessor) processor).getPipelineName();
|
||||
sb.append(":");
|
||||
sb.append(pipelineName);
|
||||
}
|
||||
String tag = processor.getTag();
|
||||
if(tag != null && !tag.isEmpty()){
|
||||
sb.append(":");
|
||||
sb.append(tag);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private void innerExecute(IndexRequest indexRequest, Pipeline pipeline, Consumer<IndexRequest> itemDroppedHandler) throws Exception {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.ingest;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -27,17 +28,28 @@ import org.elasticsearch.common.xcontent.ToXContentFragment;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class IngestStats implements Writeable, ToXContentFragment {
|
||||
private final Stats totalStats;
|
||||
private final Map<String, Stats> statsPerPipeline;
|
||||
private final List<PipelineStat> pipelineStats;
|
||||
private final Map<String, List<ProcessorStat>> processorStats;
|
||||
|
||||
public IngestStats(Stats totalStats, Map<String, Stats> statsPerPipeline) {
|
||||
/**
|
||||
* @param totalStats - The total stats for Ingest. This is the logically the sum of all pipeline stats,
|
||||
* and pipeline stats are logically the sum of the processor stats.
|
||||
* @param pipelineStats - The stats for a given ingest pipeline.
|
||||
* @param processorStats - The per-processor stats for a given pipeline. A map keyed by the pipeline identifier.
|
||||
*/
|
||||
public IngestStats(Stats totalStats, List<PipelineStat> pipelineStats, Map<String, List<ProcessorStat>> processorStats) {
|
||||
this.totalStats = totalStats;
|
||||
this.statsPerPipeline = statsPerPipeline;
|
||||
this.pipelineStats = pipelineStats;
|
||||
this.processorStats = processorStats;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -46,35 +58,45 @@ public class IngestStats implements Writeable, ToXContentFragment {
|
|||
public IngestStats(StreamInput in) throws IOException {
|
||||
this.totalStats = new Stats(in);
|
||||
int size = in.readVInt();
|
||||
this.statsPerPipeline = new HashMap<>(size);
|
||||
this.pipelineStats = new ArrayList<>(size);
|
||||
this.processorStats = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
statsPerPipeline.put(in.readString(), new Stats(in));
|
||||
String pipelineId = in.readString();
|
||||
Stats pipelineStat = new Stats(in);
|
||||
this.pipelineStats.add(new PipelineStat(pipelineId, pipelineStat));
|
||||
if (in.getVersion().onOrAfter(Version.V_6_5_0)) {
|
||||
int processorsSize = in.readVInt();
|
||||
List<ProcessorStat> processorStatsPerPipeline = new ArrayList<>(processorsSize);
|
||||
for (int j = 0; j < processorsSize; j++) {
|
||||
String processorName = in.readString();
|
||||
Stats processorStat = new Stats(in);
|
||||
processorStatsPerPipeline.add(new ProcessorStat(processorName, processorStat));
|
||||
}
|
||||
this.processorStats.put(pipelineId, processorStatsPerPipeline);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
totalStats.writeTo(out);
|
||||
out.writeVInt(statsPerPipeline.size());
|
||||
for (Map.Entry<String, Stats> entry : statsPerPipeline.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
out.writeVInt(pipelineStats.size());
|
||||
for (PipelineStat pipelineStat : pipelineStats) {
|
||||
out.writeString(pipelineStat.getPipelineId());
|
||||
pipelineStat.getStats().writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_5_0)) {
|
||||
List<ProcessorStat> processorStatsForPipeline = processorStats.get(pipelineStat.getPipelineId());
|
||||
if (processorStatsForPipeline == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(processorStatsForPipeline.size());
|
||||
for (ProcessorStat processorStat : processorStatsForPipeline) {
|
||||
out.writeString(processorStat.getName());
|
||||
processorStat.getStats().writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @return The accumulated stats for all pipelines
|
||||
*/
|
||||
public Stats getTotalStats() {
|
||||
return totalStats;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The stats on a per pipeline basis
|
||||
*/
|
||||
public Map<String, Stats> getStatsPerPipeline() {
|
||||
return statsPerPipeline;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -84,9 +106,21 @@ public class IngestStats implements Writeable, ToXContentFragment {
|
|||
totalStats.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
builder.startObject("pipelines");
|
||||
for (Map.Entry<String, Stats> entry : statsPerPipeline.entrySet()) {
|
||||
builder.startObject(entry.getKey());
|
||||
entry.getValue().toXContent(builder, params);
|
||||
for (PipelineStat pipelineStat : pipelineStats) {
|
||||
builder.startObject(pipelineStat.getPipelineId());
|
||||
pipelineStat.getStats().toXContent(builder, params);
|
||||
List<ProcessorStat> processorStatsForPipeline = processorStats.get(pipelineStat.getPipelineId());
|
||||
builder.startArray("processors");
|
||||
if (processorStatsForPipeline != null) {
|
||||
for (ProcessorStat processorStat : processorStatsForPipeline) {
|
||||
builder.startObject();
|
||||
builder.startObject(processorStat.getName());
|
||||
processorStat.getStats().toXContent(builder, params);
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
@ -94,6 +128,18 @@ public class IngestStats implements Writeable, ToXContentFragment {
|
|||
return builder;
|
||||
}
|
||||
|
||||
public Stats getTotalStats() {
|
||||
return totalStats;
|
||||
}
|
||||
|
||||
public List<PipelineStat> getPipelineStats() {
|
||||
return pipelineStats;
|
||||
}
|
||||
|
||||
public Map<String, List<ProcessorStat>> getProcessorStats() {
|
||||
return processorStats;
|
||||
}
|
||||
|
||||
public static class Stats implements Writeable, ToXContentFragment {
|
||||
|
||||
private final long ingestCount;
|
||||
|
@ -134,7 +180,6 @@ public class IngestStats implements Writeable, ToXContentFragment {
|
|||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return The total time spent of ingest preprocessing in millis.
|
||||
*/
|
||||
public long getIngestTimeInMillis() {
|
||||
|
@ -164,4 +209,77 @@ public class IngestStats implements Writeable, ToXContentFragment {
|
|||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Easy conversion from scoped {@link IngestMetric} objects to a serializable Stats objects
|
||||
*/
|
||||
static class Builder {
|
||||
private Stats totalStats;
|
||||
private List<PipelineStat> pipelineStats = new ArrayList<>();
|
||||
private Map<String, List<ProcessorStat>> processorStats = new HashMap<>();
|
||||
|
||||
|
||||
Builder addTotalMetrics(IngestMetric totalMetric) {
|
||||
this.totalStats = totalMetric.createStats();
|
||||
return this;
|
||||
}
|
||||
|
||||
Builder addPipelineMetrics(String pipelineId, IngestMetric pipelineMetric) {
|
||||
this.pipelineStats.add(new PipelineStat(pipelineId, pipelineMetric.createStats()));
|
||||
return this;
|
||||
}
|
||||
|
||||
Builder addProcessorMetrics(String pipelineId, String processorName, IngestMetric metric) {
|
||||
this.processorStats.computeIfAbsent(pipelineId, k -> new ArrayList<>())
|
||||
.add(new ProcessorStat(processorName, metric.createStats()));
|
||||
return this;
|
||||
}
|
||||
|
||||
IngestStats build() {
|
||||
return new IngestStats(totalStats, Collections.unmodifiableList(pipelineStats),
|
||||
Collections.unmodifiableMap(processorStats));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Container for pipeline stats.
|
||||
*/
|
||||
public static class PipelineStat {
|
||||
private final String pipelineId;
|
||||
private final Stats stats;
|
||||
|
||||
public PipelineStat(String pipelineId, Stats stats) {
|
||||
this.pipelineId = pipelineId;
|
||||
this.stats = stats;
|
||||
}
|
||||
|
||||
public String getPipelineId() {
|
||||
return pipelineId;
|
||||
}
|
||||
|
||||
public Stats getStats() {
|
||||
return stats;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Container for processor stats.
|
||||
*/
|
||||
public static class ProcessorStat {
|
||||
private final String name;
|
||||
private final Stats stats;
|
||||
|
||||
public ProcessorStat(String name, Stats stats) {
|
||||
this.name = name;
|
||||
this.stats = stats;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public Stats getStats() {
|
||||
return stats;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,11 +22,12 @@ package org.elasticsearch.ingest;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.time.Clock;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
|
@ -47,20 +48,21 @@ public final class Pipeline {
|
|||
private final Integer version;
|
||||
private final CompoundProcessor compoundProcessor;
|
||||
private final IngestMetric metrics;
|
||||
private final Clock clock;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
public Pipeline(String id, @Nullable String description, @Nullable Integer version, CompoundProcessor compoundProcessor) {
|
||||
this(id, description, version, compoundProcessor, Clock.systemUTC());
|
||||
this(id, description, version, compoundProcessor, System::nanoTime);
|
||||
}
|
||||
|
||||
//package private for testing
|
||||
Pipeline(String id, @Nullable String description, @Nullable Integer version, CompoundProcessor compoundProcessor, Clock clock) {
|
||||
Pipeline(String id, @Nullable String description, @Nullable Integer version, CompoundProcessor compoundProcessor,
|
||||
LongSupplier relativeTimeProvider) {
|
||||
this.id = id;
|
||||
this.description = description;
|
||||
this.compoundProcessor = compoundProcessor;
|
||||
this.version = version;
|
||||
this.metrics = new IngestMetric();
|
||||
this.clock = clock;
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
public static Pipeline create(String id, Map<String, Object> config,
|
||||
|
@ -89,7 +91,7 @@ public final class Pipeline {
|
|||
* Modifies the data of a document to be indexed based on the processor this pipeline holds
|
||||
*/
|
||||
public IngestDocument execute(IngestDocument ingestDocument) throws Exception {
|
||||
long startTimeInMillis = clock.millis();
|
||||
long startTimeInNanos = relativeTimeProvider.getAsLong();
|
||||
try {
|
||||
metrics.preIngest();
|
||||
return compoundProcessor.execute(ingestDocument);
|
||||
|
@ -97,7 +99,7 @@ public final class Pipeline {
|
|||
metrics.ingestFailed();
|
||||
throw e;
|
||||
} finally {
|
||||
long ingestTimeInMillis = clock.millis() - startTimeInMillis;
|
||||
long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos);
|
||||
metrics.postIngest(ingestTimeInMillis);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,10 @@ public class PipelineProcessor extends AbstractProcessor {
|
|||
return TYPE;
|
||||
}
|
||||
|
||||
String getPipelineName() {
|
||||
return pipelineName;
|
||||
}
|
||||
|
||||
public static final class Factory implements Processor.Factory {
|
||||
|
||||
private final IngestService ingestService;
|
||||
|
@ -65,7 +69,7 @@ public class PipelineProcessor extends AbstractProcessor {
|
|||
public PipelineProcessor create(Map<String, Processor.Factory> registry, String processorTag,
|
||||
Map<String, Object> config) throws Exception {
|
||||
String pipeline =
|
||||
ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "pipeline");
|
||||
ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "name");
|
||||
return new PipelineProcessor(processorTag, pipeline, ingestService);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
|
||||
package org.elasticsearch.ingest;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ingest.SimulateProcessorResult;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Processor to be used within Simulate API to keep track of processors executed in pipeline.
|
||||
|
@ -42,14 +42,46 @@ public final class TrackingResultProcessor implements Processor {
|
|||
|
||||
@Override
|
||||
public IngestDocument execute(IngestDocument ingestDocument) throws Exception {
|
||||
Processor processor = actualProcessor;
|
||||
try {
|
||||
actualProcessor.execute(ingestDocument);
|
||||
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument)));
|
||||
if (processor instanceof ConditionalProcessor) {
|
||||
ConditionalProcessor conditionalProcessor = (ConditionalProcessor) processor;
|
||||
if (conditionalProcessor.evaluate(ingestDocument) == false) {
|
||||
return ingestDocument;
|
||||
}
|
||||
if (conditionalProcessor.getProcessor() instanceof PipelineProcessor) {
|
||||
processor = conditionalProcessor.getProcessor();
|
||||
}
|
||||
}
|
||||
if (processor instanceof PipelineProcessor) {
|
||||
PipelineProcessor pipelineProcessor = ((PipelineProcessor) processor);
|
||||
Pipeline pipeline = pipelineProcessor.getPipeline();
|
||||
//runtime check for cycles against a copy of the document. This is needed to properly handle conditionals around pipelines
|
||||
try {
|
||||
IngestDocument ingestDocumentCopy = new IngestDocument(ingestDocument);
|
||||
ingestDocumentCopy.executePipeline(pipelineProcessor.getPipeline());
|
||||
} catch (ElasticsearchException elasticsearchException) {
|
||||
if (elasticsearchException.getCause().getCause() instanceof IllegalStateException) {
|
||||
throw elasticsearchException;
|
||||
}
|
||||
//else do nothing, let the tracking processors throw the exception while recording the path up to the failure
|
||||
} catch (Exception e) {
|
||||
// do nothing, let the tracking processors throw the exception while recording the path up to the failure
|
||||
}
|
||||
//now that we know that there are no cycles between pipelines, decorate the processors for this pipeline and execute it
|
||||
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList);
|
||||
Pipeline verbosePipeline = new Pipeline(pipeline.getId(), pipeline.getDescription(), pipeline.getVersion(),
|
||||
verbosePipelineProcessor);
|
||||
ingestDocument.executePipeline(verbosePipeline);
|
||||
} else {
|
||||
processor.execute(ingestDocument);
|
||||
processorResultList.add(new SimulateProcessorResult(processor.getTag(), new IngestDocument(ingestDocument)));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (ignoreFailure) {
|
||||
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument), e));
|
||||
processorResultList.add(new SimulateProcessorResult(processor.getTag(), new IngestDocument(ingestDocument), e));
|
||||
} else {
|
||||
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), e));
|
||||
processorResultList.add(new SimulateProcessorResult(processor.getTag(), e));
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
@ -66,35 +98,19 @@ public final class TrackingResultProcessor implements Processor {
|
|||
return actualProcessor.getTag();
|
||||
}
|
||||
|
||||
public static CompoundProcessor decorate(CompoundProcessor compoundProcessor, List<SimulateProcessorResult> processorResultList,
|
||||
Set<PipelineProcessor> pipelinesSeen) {
|
||||
public static CompoundProcessor decorate(CompoundProcessor compoundProcessor, List<SimulateProcessorResult> processorResultList) {
|
||||
List<Processor> processors = new ArrayList<>(compoundProcessor.getProcessors().size());
|
||||
for (Processor processor : compoundProcessor.getProcessors()) {
|
||||
if (processor instanceof PipelineProcessor) {
|
||||
PipelineProcessor pipelineProcessor = ((PipelineProcessor) processor);
|
||||
if (pipelinesSeen.add(pipelineProcessor) == false) {
|
||||
throw new IllegalStateException("Cycle detected for pipeline: " + pipelineProcessor.getPipeline().getId());
|
||||
}
|
||||
processors.add(decorate(pipelineProcessor.getPipeline().getCompoundProcessor(), processorResultList, pipelinesSeen));
|
||||
pipelinesSeen.remove(pipelineProcessor);
|
||||
} else if (processor instanceof CompoundProcessor) {
|
||||
processors.add(decorate((CompoundProcessor) processor, processorResultList, pipelinesSeen));
|
||||
if (processor instanceof CompoundProcessor) {
|
||||
processors.add(decorate((CompoundProcessor) processor, processorResultList));
|
||||
} else {
|
||||
processors.add(new TrackingResultProcessor(compoundProcessor.isIgnoreFailure(), processor, processorResultList));
|
||||
}
|
||||
}
|
||||
List<Processor> onFailureProcessors = new ArrayList<>(compoundProcessor.getProcessors().size());
|
||||
for (Processor processor : compoundProcessor.getOnFailureProcessors()) {
|
||||
if (processor instanceof PipelineProcessor) {
|
||||
PipelineProcessor pipelineProcessor = ((PipelineProcessor) processor);
|
||||
if (pipelinesSeen.add(pipelineProcessor) == false) {
|
||||
throw new IllegalStateException("Cycle detected for pipeline: " + pipelineProcessor.getPipeline().getId());
|
||||
}
|
||||
onFailureProcessors.add(decorate(pipelineProcessor.getPipeline().getCompoundProcessor(), processorResultList,
|
||||
pipelinesSeen));
|
||||
pipelinesSeen.remove(pipelineProcessor);
|
||||
} else if (processor instanceof CompoundProcessor) {
|
||||
onFailureProcessors.add(decorate((CompoundProcessor) processor, processorResultList, pipelinesSeen));
|
||||
if (processor instanceof CompoundProcessor) {
|
||||
onFailureProcessors.add(decorate((CompoundProcessor) processor, processorResultList));
|
||||
} else {
|
||||
onFailureProcessors.add(new TrackingResultProcessor(compoundProcessor.isIgnoreFailure(), processor, processorResultList));
|
||||
}
|
||||
|
|
|
@ -133,11 +133,7 @@ public class ExceptionSerializationTests extends ESTestCase {
|
|||
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
|
||||
Path next = pkgPrefix.resolve(dir.getFileName());
|
||||
if (ignore.contains(next)) {
|
||||
return FileVisitResult.SKIP_SUBTREE;
|
||||
}
|
||||
pkgPrefix = next;
|
||||
pkgPrefix = pkgPrefix.resolve(dir.getFileName());
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@ import static java.util.Collections.emptyMap;
|
|||
import static java.util.Collections.emptySet;
|
||||
|
||||
public class NodeStatsTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
NodeStats nodeStats = createNodeStats();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
|
@ -271,14 +270,29 @@ public class NodeStatsTests extends ESTestCase {
|
|||
assertEquals(totalStats.getIngestCurrent(), deserializedIngestStats.getTotalStats().getIngestCurrent());
|
||||
assertEquals(totalStats.getIngestFailedCount(), deserializedIngestStats.getTotalStats().getIngestFailedCount());
|
||||
assertEquals(totalStats.getIngestTimeInMillis(), deserializedIngestStats.getTotalStats().getIngestTimeInMillis());
|
||||
assertEquals(ingestStats.getStatsPerPipeline().size(), deserializedIngestStats.getStatsPerPipeline().size());
|
||||
for (Map.Entry<String, IngestStats.Stats> entry : ingestStats.getStatsPerPipeline().entrySet()) {
|
||||
IngestStats.Stats stats = entry.getValue();
|
||||
IngestStats.Stats deserializedStats = deserializedIngestStats.getStatsPerPipeline().get(entry.getKey());
|
||||
assertEquals(stats.getIngestFailedCount(), deserializedStats.getIngestFailedCount());
|
||||
assertEquals(stats.getIngestTimeInMillis(), deserializedStats.getIngestTimeInMillis());
|
||||
assertEquals(stats.getIngestCurrent(), deserializedStats.getIngestCurrent());
|
||||
assertEquals(stats.getIngestCount(), deserializedStats.getIngestCount());
|
||||
assertEquals(ingestStats.getPipelineStats().size(), deserializedIngestStats.getPipelineStats().size());
|
||||
for (IngestStats.PipelineStat pipelineStat : ingestStats.getPipelineStats()) {
|
||||
String pipelineId = pipelineStat.getPipelineId();
|
||||
IngestStats.Stats deserializedPipelineStats =
|
||||
getPipelineStats(deserializedIngestStats.getPipelineStats(), pipelineId);
|
||||
assertEquals(pipelineStat.getStats().getIngestFailedCount(), deserializedPipelineStats.getIngestFailedCount());
|
||||
assertEquals(pipelineStat.getStats().getIngestTimeInMillis(), deserializedPipelineStats.getIngestTimeInMillis());
|
||||
assertEquals(pipelineStat.getStats().getIngestCurrent(), deserializedPipelineStats.getIngestCurrent());
|
||||
assertEquals(pipelineStat.getStats().getIngestCount(), deserializedPipelineStats.getIngestCount());
|
||||
List<IngestStats.ProcessorStat> processorStats = ingestStats.getProcessorStats().get(pipelineId);
|
||||
//intentionally validating identical order
|
||||
Iterator<IngestStats.ProcessorStat> it = deserializedIngestStats.getProcessorStats().get(pipelineId).iterator();
|
||||
for (IngestStats.ProcessorStat processorStat : processorStats) {
|
||||
IngestStats.ProcessorStat deserializedProcessorStat = it.next();
|
||||
assertEquals(processorStat.getStats().getIngestFailedCount(),
|
||||
deserializedProcessorStat.getStats().getIngestFailedCount());
|
||||
assertEquals(processorStat.getStats().getIngestTimeInMillis(),
|
||||
deserializedProcessorStat.getStats().getIngestTimeInMillis());
|
||||
assertEquals(processorStat.getStats().getIngestCurrent(),
|
||||
deserializedProcessorStat.getStats().getIngestCurrent());
|
||||
assertEquals(processorStat.getStats().getIngestCount(), deserializedProcessorStat.getStats().getIngestCount());
|
||||
}
|
||||
assertFalse(it.hasNext());
|
||||
}
|
||||
}
|
||||
AdaptiveSelectionStats adaptiveStats = nodeStats.getAdaptiveSelectionStats();
|
||||
|
@ -429,14 +443,24 @@ public class NodeStatsTests extends ESTestCase {
|
|||
if (frequently()) {
|
||||
IngestStats.Stats totalStats = new IngestStats.Stats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(),
|
||||
randomNonNegativeLong());
|
||||
int numPipelines = randomIntBetween(0, 10);
|
||||
int numProcessors = randomIntBetween(0, 10);
|
||||
List<IngestStats.PipelineStat> ingestPipelineStats = new ArrayList<>(numPipelines);
|
||||
Map<String, List<IngestStats.ProcessorStat>> ingestProcessorStats = new HashMap<>(numPipelines);
|
||||
for (int i = 0; i < numPipelines; i++) {
|
||||
String pipelineId = randomAlphaOfLengthBetween(3, 10);
|
||||
ingestPipelineStats.add(new IngestStats.PipelineStat(pipelineId, new IngestStats.Stats
|
||||
(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong())));
|
||||
|
||||
int numStatsPerPipeline = randomIntBetween(0, 10);
|
||||
Map<String, IngestStats.Stats> statsPerPipeline = new HashMap<>();
|
||||
for (int i = 0; i < numStatsPerPipeline; i++) {
|
||||
statsPerPipeline.put(randomAlphaOfLengthBetween(3, 10), new IngestStats.Stats(randomNonNegativeLong(),
|
||||
randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()));
|
||||
List<IngestStats.ProcessorStat> processorPerPipeline = new ArrayList<>(numProcessors);
|
||||
for (int j =0; j < numProcessors;j++) {
|
||||
IngestStats.Stats processorStats = new IngestStats.Stats
|
||||
(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong());
|
||||
processorPerPipeline.add(new IngestStats.ProcessorStat(randomAlphaOfLengthBetween(3, 10), processorStats));
|
||||
}
|
||||
ingestStats = new IngestStats(totalStats, statsPerPipeline);
|
||||
ingestProcessorStats.put(pipelineId,processorPerPipeline);
|
||||
}
|
||||
ingestStats = new IngestStats(totalStats, ingestPipelineStats, ingestProcessorStats);
|
||||
}
|
||||
AdaptiveSelectionStats adaptiveSelectionStats = null;
|
||||
if (frequently()) {
|
||||
|
@ -465,4 +489,8 @@ public class NodeStatsTests extends ESTestCase {
|
|||
fsInfo, transportStats, httpStats, allCircuitBreakerStats, scriptStats, discoveryStats,
|
||||
ingestStats, adaptiveSelectionStats);
|
||||
}
|
||||
|
||||
private IngestStats.Stats getPipelineStats(List<IngestStats.PipelineStat> pipelineStats, String id) {
|
||||
return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.DateMathExpressionResolver;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
@ -42,7 +41,7 @@ import static org.joda.time.DateTimeZone.UTC;
|
|||
|
||||
public class DateMathExpressionResolverTests extends ESTestCase {
|
||||
|
||||
private final DateMathExpressionResolver expressionResolver = new DateMathExpressionResolver(Settings.EMPTY);
|
||||
private final DateMathExpressionResolver expressionResolver = new DateMathExpressionResolver();
|
||||
private final Context context = new Context(
|
||||
ClusterState.builder(new ClusterName("_name")).build(), IndicesOptions.strictExpand()
|
||||
);
|
||||
|
@ -118,37 +117,6 @@ public class DateMathExpressionResolverTests extends ESTestCase {
|
|||
assertThat(result.get(3), equalTo(".logstash-" + DateTimeFormat.forPattern("YYYY.MM").print(new DateTime(context.getStartTime(), UTC).withDayOfMonth(1))));
|
||||
}
|
||||
|
||||
public void testExpression_CustomTimeZoneInSetting() throws Exception {
|
||||
DateTimeZone timeZone;
|
||||
int hoursOffset;
|
||||
int minutesOffset = 0;
|
||||
if (randomBoolean()) {
|
||||
hoursOffset = randomIntBetween(-12, 14);
|
||||
timeZone = DateTimeZone.forOffsetHours(hoursOffset);
|
||||
} else {
|
||||
hoursOffset = randomIntBetween(-11, 13);
|
||||
minutesOffset = randomIntBetween(0, 59);
|
||||
timeZone = DateTimeZone.forOffsetHoursMinutes(hoursOffset, minutesOffset);
|
||||
}
|
||||
DateTime now;
|
||||
if (hoursOffset >= 0) {
|
||||
// rounding to next day 00:00
|
||||
now = DateTime.now(UTC).plusHours(hoursOffset).plusMinutes(minutesOffset).withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0);
|
||||
} else {
|
||||
// rounding to today 00:00
|
||||
now = DateTime.now(UTC).withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0);
|
||||
}
|
||||
Settings settings = Settings.builder()
|
||||
.put("date_math_expression_resolver.default_time_zone", timeZone.getID())
|
||||
.build();
|
||||
DateMathExpressionResolver expressionResolver = new DateMathExpressionResolver(settings);
|
||||
Context context = new Context(this.context.getState(), this.context.getOptions(), now.getMillis());
|
||||
List<String> results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{YYYY.MM.dd}}>"));
|
||||
assertThat(results.size(), equalTo(1));
|
||||
logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, results.get(0));
|
||||
assertThat(results.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now.withZone(timeZone))));
|
||||
}
|
||||
|
||||
public void testExpression_CustomTimeZoneInIndexName() throws Exception {
|
||||
DateTimeZone timeZone;
|
||||
int hoursOffset;
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidAliasNameException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
@ -82,7 +83,7 @@ import static org.mockito.Mockito.when;
|
|||
public class IndexCreationTaskTests extends ESTestCase {
|
||||
|
||||
private final IndicesService indicesService = mock(IndicesService.class);
|
||||
private final AliasValidator aliasValidator = mock(AliasValidator.class);
|
||||
private final AliasValidator aliasValidator = new AliasValidator(Settings.EMPTY);
|
||||
private final NamedXContentRegistry xContentRegistry = mock(NamedXContentRegistry.class);
|
||||
private final CreateIndexClusterStateUpdateRequest request = mock(CreateIndexClusterStateUpdateRequest.class);
|
||||
private final Logger logger = mock(Logger.class);
|
||||
|
@ -149,6 +150,12 @@ public class IndexCreationTaskTests extends ESTestCase {
|
|||
assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1"));
|
||||
}
|
||||
|
||||
public void testInvalidAliasName() throws Exception {
|
||||
final String[] invalidAliasNames = new String[] { "-alias1", "+alias2", "_alias3", "a#lias", "al:ias", ".", ".." };
|
||||
setupRequestAlias(new Alias(randomFrom(invalidAliasNames)));
|
||||
expectThrows(InvalidAliasNameException.class, this::executeTask);
|
||||
}
|
||||
|
||||
public void testRequestDataHavePriorityOverTemplateData() throws Exception {
|
||||
final CompressedXContent tplMapping = createMapping("text");
|
||||
final CompressedXContent reqMapping = createMapping("keyword");
|
||||
|
|
|
@ -66,6 +66,14 @@ public class MetaDataTests extends ESTestCase {
|
|||
assertThat(aliases.size(), equalTo(0));
|
||||
}
|
||||
{
|
||||
final GetAliasesRequest request;
|
||||
if (randomBoolean()) {
|
||||
request = new GetAliasesRequest();
|
||||
} else {
|
||||
request = new GetAliasesRequest(randomFrom("alias1", "alias2"));
|
||||
// replacing with empty aliases behaves as if aliases were unspecified at request building
|
||||
request.replaceAliases(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliases = metaData.findAliases(new GetAliasesRequest(), new String[]{"index"});
|
||||
assertThat(aliases.size(), equalTo(1));
|
||||
List<AliasMetaData> aliasMetaDataList = aliases.get("index");
|
||||
|
@ -73,12 +81,6 @@ public class MetaDataTests extends ESTestCase {
|
|||
assertThat(aliasMetaDataList.get(0).alias(), equalTo("alias1"));
|
||||
assertThat(aliasMetaDataList.get(1).alias(), equalTo("alias2"));
|
||||
}
|
||||
{
|
||||
GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias1");
|
||||
getAliasesRequest.replaceAliases(Strings.EMPTY_ARRAY);
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliases = metaData.findAliases(getAliasesRequest, new String[]{"index"});
|
||||
assertThat(aliases.size(), equalTo(0));
|
||||
}
|
||||
{
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> aliases =
|
||||
metaData.findAliases(new GetAliasesRequest("alias*"), new String[]{"index"});
|
||||
|
|
|
@ -156,7 +156,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery2() throws IOException {
|
||||
|
@ -166,7 +166,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" \"" + GEO_POINT_FIELD_NAME + "\":[-70, 40]\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery3() throws IOException {
|
||||
|
@ -176,7 +176,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" \"" + GEO_POINT_FIELD_NAME + "\":\"40, -70\"\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery4() throws IOException {
|
||||
|
@ -186,7 +186,8 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" \"" + GEO_POINT_FIELD_NAME + "\":\"drn5x1g8cu2y\"\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
GeoPoint geoPoint = GeoPoint.fromGeohash("drn5x1g8cu2y");
|
||||
assertGeoDistanceRangeQuery(query, geoPoint.getLat(), geoPoint.getLon(), 12, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery5() throws IOException {
|
||||
|
@ -200,7 +201,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery6() throws IOException {
|
||||
|
@ -214,7 +215,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery7() throws IOException {
|
||||
|
@ -227,7 +228,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 0.012, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 19.312128, DistanceUnit.DEFAULT);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery8() throws IOException {
|
||||
|
@ -240,7 +241,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.KILOMETERS);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 19.312128, DistanceUnit.DEFAULT);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery9() throws IOException {
|
||||
|
@ -254,7 +255,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 19.312128, DistanceUnit.KILOMETERS);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery10() throws IOException {
|
||||
|
@ -268,7 +269,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 19.312128, DistanceUnit.KILOMETERS);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery11() throws IOException {
|
||||
|
@ -281,7 +282,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 19.312128, DistanceUnit.KILOMETERS);
|
||||
}
|
||||
|
||||
public void testParsingAndToQuery12() throws IOException {
|
||||
|
@ -295,13 +296,16 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
" }\n" +
|
||||
" }\n" +
|
||||
"}\n";
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.DEFAULT);
|
||||
assertGeoDistanceRangeQuery(query, 40, -70, 12, DistanceUnit.MILES);
|
||||
}
|
||||
|
||||
private void assertGeoDistanceRangeQuery(String query, double lat, double lon, double distance, DistanceUnit distanceUnit)
|
||||
throws IOException {
|
||||
parseQuery(query).toQuery(createShardContext());
|
||||
// TODO: what can we check? See https://github.com/elastic/elasticsearch/issues/34043
|
||||
Query parsedQuery = parseQuery(query).toQuery(createShardContext());
|
||||
// The parsedQuery contains IndexOrDocValuesQuery, which wraps LatLonPointDistanceQuery which in turn has default visibility,
|
||||
// so we cannot access its fields directly to check and have to use toString() here instead.
|
||||
assertEquals(parsedQuery.toString(),
|
||||
"mapped_geo_point:" + lat + "," + lon + " +/- " + distanceUnit.toMeters(distance) + " meters");
|
||||
}
|
||||
|
||||
public void testFromJson() throws IOException {
|
||||
|
|
|
@ -27,11 +27,17 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class CompoundProcessorTests extends ESTestCase {
|
||||
private IngestDocument ingestDocument;
|
||||
|
@ -49,18 +55,29 @@ public class CompoundProcessorTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSingleProcessor() throws Exception {
|
||||
TestProcessor processor = new TestProcessor(ingestDocument -> {});
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(processor);
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(1));
|
||||
TestProcessor processor = new TestProcessor(ingestDocument ->{
|
||||
assertStats(0, ingestDocument.getFieldValue("compoundProcessor", CompoundProcessor.class), 1, 0, 0, 0);
|
||||
});
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(relativeTimeProvider, processor);
|
||||
ingestDocument.setFieldValue("compoundProcessor", compoundProcessor); //ugly hack to assert current count = 1
|
||||
assertThat(compoundProcessor.getProcessors().size(), equalTo(1));
|
||||
assertThat(compoundProcessor.getProcessors().get(0), sameInstance(processor));
|
||||
assertThat(compoundProcessor.getProcessors().get(0), sameInstance(processor));
|
||||
assertThat(compoundProcessor.getOnFailureProcessors().isEmpty(), is(true));
|
||||
compoundProcessor.execute(ingestDocument);
|
||||
verify(relativeTimeProvider, times(2)).getAsLong();
|
||||
assertThat(processor.getInvokedCounter(), equalTo(1));
|
||||
assertStats(compoundProcessor, 1, 0, 1);
|
||||
|
||||
}
|
||||
|
||||
public void testSingleProcessorWithException() throws Exception {
|
||||
TestProcessor processor = new TestProcessor(ingestDocument -> {throw new RuntimeException("error");});
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(processor);
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(relativeTimeProvider, processor);
|
||||
assertThat(compoundProcessor.getProcessors().size(), equalTo(1));
|
||||
assertThat(compoundProcessor.getProcessors().get(0), sameInstance(processor));
|
||||
assertThat(compoundProcessor.getOnFailureProcessors().isEmpty(), is(true));
|
||||
|
@ -71,15 +88,22 @@ public class CompoundProcessorTests extends ESTestCase {
|
|||
assertThat(e.getRootCause().getMessage(), equalTo("error"));
|
||||
}
|
||||
assertThat(processor.getInvokedCounter(), equalTo(1));
|
||||
assertStats(compoundProcessor, 1, 1, 0);
|
||||
|
||||
}
|
||||
|
||||
public void testIgnoreFailure() throws Exception {
|
||||
TestProcessor processor1 = new TestProcessor(ingestDocument -> {throw new RuntimeException("error");});
|
||||
TestProcessor processor2 = new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue("field", "value");});
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(true, Arrays.asList(processor1, processor2), Collections.emptyList());
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
CompoundProcessor compoundProcessor =
|
||||
new CompoundProcessor(true, Arrays.asList(processor1, processor2), Collections.emptyList(), relativeTimeProvider);
|
||||
compoundProcessor.execute(ingestDocument);
|
||||
assertThat(processor1.getInvokedCounter(), equalTo(1));
|
||||
assertStats(0, compoundProcessor, 0, 1, 1, 0);
|
||||
assertThat(processor2.getInvokedCounter(), equalTo(1));
|
||||
assertStats(1, compoundProcessor, 0, 1, 0, 0);
|
||||
assertThat(ingestDocument.getFieldValue("field", String.class), equalTo("value"));
|
||||
}
|
||||
|
||||
|
@ -93,11 +117,15 @@ public class CompoundProcessorTests extends ESTestCase {
|
|||
assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("id"));
|
||||
});
|
||||
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(1));
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor1),
|
||||
Collections.singletonList(processor2));
|
||||
Collections.singletonList(processor2), relativeTimeProvider);
|
||||
compoundProcessor.execute(ingestDocument);
|
||||
verify(relativeTimeProvider, times(2)).getAsLong();
|
||||
|
||||
assertThat(processor1.getInvokedCounter(), equalTo(1));
|
||||
assertStats(compoundProcessor, 1, 1, 1);
|
||||
assertThat(processor2.getInvokedCounter(), equalTo(1));
|
||||
}
|
||||
|
||||
|
@ -118,14 +146,17 @@ public class CompoundProcessorTests extends ESTestCase {
|
|||
assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("second"));
|
||||
assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("id2"));
|
||||
});
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
CompoundProcessor compoundOnFailProcessor = new CompoundProcessor(false, Collections.singletonList(processorToFail),
|
||||
Collections.singletonList(lastProcessor));
|
||||
Collections.singletonList(lastProcessor), relativeTimeProvider);
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(processor),
|
||||
Collections.singletonList(compoundOnFailProcessor));
|
||||
Collections.singletonList(compoundOnFailProcessor), relativeTimeProvider);
|
||||
compoundProcessor.execute(ingestDocument);
|
||||
|
||||
assertThat(processorToFail.getInvokedCounter(), equalTo(1));
|
||||
assertThat(lastProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertStats(compoundProcessor, 1, 1, 0);
|
||||
}
|
||||
|
||||
public void testCompoundProcessorExceptionFailWithoutOnFailure() throws Exception {
|
||||
|
@ -137,15 +168,18 @@ public class CompoundProcessorTests extends ESTestCase {
|
|||
assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TYPE_FIELD), equalTo("first"));
|
||||
assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("id1"));
|
||||
});
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
|
||||
CompoundProcessor failCompoundProcessor = new CompoundProcessor(firstProcessor);
|
||||
CompoundProcessor failCompoundProcessor = new CompoundProcessor(relativeTimeProvider, firstProcessor);
|
||||
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(failCompoundProcessor),
|
||||
Collections.singletonList(secondProcessor));
|
||||
Collections.singletonList(secondProcessor), relativeTimeProvider);
|
||||
compoundProcessor.execute(ingestDocument);
|
||||
|
||||
assertThat(firstProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertThat(secondProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertStats(compoundProcessor, 1, 1, 0);
|
||||
}
|
||||
|
||||
public void testCompoundProcessorExceptionFail() throws Exception {
|
||||
|
@ -160,15 +194,18 @@ public class CompoundProcessorTests extends ESTestCase {
|
|||
assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("tag_fail"));
|
||||
});
|
||||
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
CompoundProcessor failCompoundProcessor = new CompoundProcessor(false, Collections.singletonList(firstProcessor),
|
||||
Collections.singletonList(failProcessor));
|
||||
Collections.singletonList(failProcessor), relativeTimeProvider);
|
||||
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(failCompoundProcessor),
|
||||
Collections.singletonList(secondProcessor));
|
||||
Collections.singletonList(secondProcessor), relativeTimeProvider);
|
||||
compoundProcessor.execute(ingestDocument);
|
||||
|
||||
assertThat(firstProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertThat(secondProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertStats(compoundProcessor, 1, 1, 0);
|
||||
}
|
||||
|
||||
public void testCompoundProcessorExceptionFailInOnFailure() throws Exception {
|
||||
|
@ -183,27 +220,44 @@ public class CompoundProcessorTests extends ESTestCase {
|
|||
assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD), equalTo("tag_fail"));
|
||||
});
|
||||
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
CompoundProcessor failCompoundProcessor = new CompoundProcessor(false, Collections.singletonList(firstProcessor),
|
||||
Collections.singletonList(new CompoundProcessor(failProcessor)));
|
||||
Collections.singletonList(new CompoundProcessor(relativeTimeProvider, failProcessor)));
|
||||
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(false, Collections.singletonList(failCompoundProcessor),
|
||||
Collections.singletonList(secondProcessor));
|
||||
Collections.singletonList(secondProcessor), relativeTimeProvider);
|
||||
compoundProcessor.execute(ingestDocument);
|
||||
|
||||
assertThat(firstProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertThat(secondProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertStats(compoundProcessor, 1, 1, 0);
|
||||
}
|
||||
|
||||
public void testBreakOnFailure() throws Exception {
|
||||
TestProcessor firstProcessor = new TestProcessor("id1", "first", ingestDocument -> {throw new RuntimeException("error1");});
|
||||
TestProcessor secondProcessor = new TestProcessor("id2", "second", ingestDocument -> {throw new RuntimeException("error2");});
|
||||
TestProcessor onFailureProcessor = new TestProcessor("id2", "on_failure", ingestDocument -> {});
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
CompoundProcessor pipeline = new CompoundProcessor(false, Arrays.asList(firstProcessor, secondProcessor),
|
||||
Collections.singletonList(onFailureProcessor));
|
||||
Collections.singletonList(onFailureProcessor), relativeTimeProvider);
|
||||
pipeline.execute(ingestDocument);
|
||||
assertThat(firstProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertThat(secondProcessor.getInvokedCounter(), equalTo(0));
|
||||
assertThat(onFailureProcessor.getInvokedCounter(), equalTo(1));
|
||||
assertStats(pipeline, 1, 1, 0);
|
||||
}
|
||||
|
||||
private void assertStats(CompoundProcessor compoundProcessor, long count, long failed, long time) {
|
||||
assertStats(0, compoundProcessor, 0L, count, failed, time);
|
||||
}
|
||||
|
||||
private void assertStats(int processor, CompoundProcessor compoundProcessor, long current, long count, long failed, long time) {
|
||||
IngestStats.Stats stats = compoundProcessor.getProcessorsWithMetrics().get(processor).v2().createStats();
|
||||
assertThat(stats.getIngestCount(), equalTo(count));
|
||||
assertThat(stats.getIngestCurrent(), equalTo(current));
|
||||
assertThat(stats.getIngestFailedCount(), equalTo(failed));
|
||||
assertThat(stats.getIngestTimeInMillis(), equalTo(time));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,12 +33,18 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class ConditionalProcessorTests extends ESTestCase {
|
||||
|
||||
|
@ -60,6 +66,8 @@ public class ConditionalProcessorTests extends ESTestCase {
|
|||
new HashMap<>(ScriptModule.CORE_CONTEXTS)
|
||||
);
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(1), 0L, TimeUnit.MILLISECONDS.toNanos(2));
|
||||
ConditionalProcessor processor = new ConditionalProcessor(
|
||||
randomAlphaOfLength(10),
|
||||
new Script(
|
||||
|
@ -67,7 +75,10 @@ public class ConditionalProcessorTests extends ESTestCase {
|
|||
scriptName, Collections.emptyMap()), scriptService,
|
||||
new Processor() {
|
||||
@Override
|
||||
public IngestDocument execute(final IngestDocument ingestDocument) throws Exception {
|
||||
public IngestDocument execute(final IngestDocument ingestDocument){
|
||||
if(ingestDocument.hasField("error")){
|
||||
throw new RuntimeException("error");
|
||||
}
|
||||
ingestDocument.setFieldValue("foo", "bar");
|
||||
return ingestDocument;
|
||||
}
|
||||
|
@ -81,20 +92,37 @@ public class ConditionalProcessorTests extends ESTestCase {
|
|||
public String getTag() {
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
|
||||
ingestDocument.setFieldValue(conditionalField, trueValue);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(trueValue));
|
||||
assertThat(ingestDocument.getSourceAndMetadata().get("foo"), is("bar"));
|
||||
}, relativeTimeProvider);
|
||||
|
||||
//false, never call processor never increments metrics
|
||||
String falseValue = "falsy";
|
||||
ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
|
||||
ingestDocument.setFieldValue(conditionalField, falseValue);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(falseValue));
|
||||
assertThat(ingestDocument.getSourceAndMetadata(), not(hasKey("foo")));
|
||||
assertStats(processor, 0, 0, 0);
|
||||
|
||||
ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
|
||||
ingestDocument.setFieldValue(conditionalField, falseValue);
|
||||
ingestDocument.setFieldValue("error", true);
|
||||
processor.execute(ingestDocument);
|
||||
assertStats(processor, 0, 0, 0);
|
||||
|
||||
//true, always call processor and increments metrics
|
||||
ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
|
||||
ingestDocument.setFieldValue(conditionalField, trueValue);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(trueValue));
|
||||
assertThat(ingestDocument.getSourceAndMetadata().get("foo"), is("bar"));
|
||||
assertStats(processor, 1, 0, 1);
|
||||
|
||||
ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
|
||||
ingestDocument.setFieldValue(conditionalField, trueValue);
|
||||
ingestDocument.setFieldValue("error", true);
|
||||
IngestDocument finalIngestDocument = ingestDocument;
|
||||
expectThrows(RuntimeException.class, () -> processor.execute(finalIngestDocument));
|
||||
assertStats(processor, 2, 1, 2);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -141,5 +169,14 @@ public class ConditionalProcessorTests extends ESTestCase {
|
|||
Exception e = expectedException.get();
|
||||
assertThat(e, instanceOf(UnsupportedOperationException.class));
|
||||
assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage());
|
||||
assertStats(processor, 0, 0, 0);
|
||||
}
|
||||
|
||||
private static void assertStats(ConditionalProcessor conditionalProcessor, long count, long failed, long time) {
|
||||
IngestStats.Stats stats = conditionalProcessor.getMetric().createStats();
|
||||
assertThat(stats.getIngestCount(), equalTo(count));
|
||||
assertThat(stats.getIngestCurrent(), equalTo(0L));
|
||||
assertThat(stats.getIngestFailedCount(), equalTo(failed));
|
||||
assertThat(stats.getIngestTimeInMillis(), greaterThanOrEqualTo(time));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,6 +63,7 @@ import java.util.function.Consumer;
|
|||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
@ -746,16 +747,23 @@ public class IngestServiceTests extends ESTestCase {
|
|||
verify(completionHandler, times(1)).accept(null);
|
||||
}
|
||||
|
||||
public void testStats() {
|
||||
public void testStats() throws Exception {
|
||||
final Processor processor = mock(Processor.class);
|
||||
IngestService ingestService = createWithProcessors(Collections.singletonMap(
|
||||
"mock", (factories, tag, config) -> processor));
|
||||
final Processor processorFailure = mock(Processor.class);
|
||||
when(processor.getType()).thenReturn("mock");
|
||||
when(processor.getTag()).thenReturn("mockTag");
|
||||
when(processorFailure.getType()).thenReturn("failure-mock");
|
||||
//avoid returning null and dropping the document
|
||||
when(processor.execute(any(IngestDocument.class))).thenReturn( RandomDocumentPicks.randomIngestDocument(random()));
|
||||
when(processorFailure.execute(any(IngestDocument.class))).thenThrow(new RuntimeException("error"));
|
||||
Map<String, Processor.Factory> map = new HashMap<>(2);
|
||||
map.put("mock", (factories, tag, config) -> processor);
|
||||
map.put("failure-mock", (factories, tag, config) -> processorFailure);
|
||||
IngestService ingestService = createWithProcessors(map);
|
||||
|
||||
final IngestStats initialStats = ingestService.stats();
|
||||
assertThat(initialStats.getStatsPerPipeline().size(), equalTo(0));
|
||||
assertThat(initialStats.getTotalStats().getIngestCount(), equalTo(0L));
|
||||
assertThat(initialStats.getTotalStats().getIngestCurrent(), equalTo(0L));
|
||||
assertThat(initialStats.getTotalStats().getIngestFailedCount(), equalTo(0L));
|
||||
assertThat(initialStats.getTotalStats().getIngestTimeInMillis(), equalTo(0L));
|
||||
assertThat(initialStats.getPipelineStats().size(), equalTo(0));
|
||||
assertStats(initialStats.getTotalStats(), 0, 0, 0);
|
||||
|
||||
PutPipelineRequest putRequest = new PutPipelineRequest("_id1",
|
||||
new BytesArray("{\"processors\": [{\"mock\" : {}}]}"), XContentType.JSON);
|
||||
|
@ -769,7 +777,6 @@ public class IngestServiceTests extends ESTestCase {
|
|||
clusterState = IngestService.innerPut(putRequest, clusterState);
|
||||
ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState));
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked") final BiConsumer<IndexRequest, Exception> failureHandler = mock(BiConsumer.class);
|
||||
@SuppressWarnings("unchecked") final Consumer<Exception> completionHandler = mock(Consumer.class);
|
||||
|
||||
|
@ -778,18 +785,33 @@ public class IngestServiceTests extends ESTestCase {
|
|||
indexRequest.source(randomAlphaOfLength(10), randomAlphaOfLength(10));
|
||||
ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {});
|
||||
final IngestStats afterFirstRequestStats = ingestService.stats();
|
||||
assertThat(afterFirstRequestStats.getStatsPerPipeline().size(), equalTo(2));
|
||||
assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L));
|
||||
assertThat(afterFirstRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(0L));
|
||||
assertThat(afterFirstRequestStats.getTotalStats().getIngestCount(), equalTo(1L));
|
||||
assertThat(afterFirstRequestStats.getPipelineStats().size(), equalTo(2));
|
||||
|
||||
afterFirstRequestStats.getProcessorStats().get("_id1").forEach(p -> assertEquals(p.getName(), "mock:mockTag"));
|
||||
afterFirstRequestStats.getProcessorStats().get("_id2").forEach(p -> assertEquals(p.getName(), "mock:mockTag"));
|
||||
|
||||
//total
|
||||
assertStats(afterFirstRequestStats.getTotalStats(), 1, 0 ,0);
|
||||
//pipeline
|
||||
assertPipelineStats(afterFirstRequestStats.getPipelineStats(), "_id1", 1, 0, 0);
|
||||
assertPipelineStats(afterFirstRequestStats.getPipelineStats(), "_id2", 0, 0, 0);
|
||||
//processor
|
||||
assertProcessorStats(0, afterFirstRequestStats, "_id1", 1, 0, 0);
|
||||
assertProcessorStats(0, afterFirstRequestStats, "_id2", 0, 0, 0);
|
||||
|
||||
|
||||
indexRequest.setPipeline("_id2");
|
||||
ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {});
|
||||
final IngestStats afterSecondRequestStats = ingestService.stats();
|
||||
assertThat(afterSecondRequestStats.getStatsPerPipeline().size(), equalTo(2));
|
||||
assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(1L));
|
||||
assertThat(afterSecondRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L));
|
||||
assertThat(afterSecondRequestStats.getTotalStats().getIngestCount(), equalTo(2L));
|
||||
assertThat(afterSecondRequestStats.getPipelineStats().size(), equalTo(2));
|
||||
//total
|
||||
assertStats(afterSecondRequestStats.getTotalStats(), 2, 0 ,0);
|
||||
//pipeline
|
||||
assertPipelineStats(afterSecondRequestStats.getPipelineStats(), "_id1", 1, 0, 0);
|
||||
assertPipelineStats(afterSecondRequestStats.getPipelineStats(), "_id2", 1, 0, 0);
|
||||
//processor
|
||||
assertProcessorStats(0, afterSecondRequestStats, "_id1", 1, 0, 0);
|
||||
assertProcessorStats(0, afterSecondRequestStats, "_id2", 1, 0, 0);
|
||||
|
||||
//update cluster state and ensure that new stats are added to old stats
|
||||
putRequest = new PutPipelineRequest("_id1",
|
||||
|
@ -800,13 +822,66 @@ public class IngestServiceTests extends ESTestCase {
|
|||
indexRequest.setPipeline("_id1");
|
||||
ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {});
|
||||
final IngestStats afterThirdRequestStats = ingestService.stats();
|
||||
assertThat(afterThirdRequestStats.getStatsPerPipeline().size(), equalTo(2));
|
||||
assertThat(afterThirdRequestStats.getStatsPerPipeline().get("_id1").getIngestCount(), equalTo(2L));
|
||||
assertThat(afterThirdRequestStats.getStatsPerPipeline().get("_id2").getIngestCount(), equalTo(1L));
|
||||
assertThat(afterThirdRequestStats.getTotalStats().getIngestCount(), equalTo(3L));
|
||||
assertThat(afterThirdRequestStats.getPipelineStats().size(), equalTo(2));
|
||||
//total
|
||||
assertStats(afterThirdRequestStats.getTotalStats(), 3, 0 ,0);
|
||||
//pipeline
|
||||
assertPipelineStats(afterThirdRequestStats.getPipelineStats(), "_id1", 2, 0, 0);
|
||||
assertPipelineStats(afterThirdRequestStats.getPipelineStats(), "_id2", 1, 0, 0);
|
||||
//The number of processors for the "id1" pipeline changed, so the per-processor metrics are not carried forward. This is
|
||||
//due to the parallel array's used to identify which metrics to carry forward. With out unique ids or semantic equals for each
|
||||
//processor, parallel arrays are the best option for of carrying forward metrics between pipeline changes. However, in some cases,
|
||||
//like this one it may not readily obvious why the metrics were not carried forward.
|
||||
assertProcessorStats(0, afterThirdRequestStats, "_id1", 1, 0, 0);
|
||||
assertProcessorStats(1, afterThirdRequestStats, "_id1", 1, 0, 0);
|
||||
assertProcessorStats(0, afterThirdRequestStats, "_id2", 1, 0, 0);
|
||||
|
||||
//test a failure, and that the processor stats are added from the old stats
|
||||
putRequest = new PutPipelineRequest("_id1",
|
||||
new BytesArray("{\"processors\": [{\"failure-mock\" : { \"on_failure\": [{\"mock\" : {}}]}}, {\"mock\" : {}}]}"),
|
||||
XContentType.JSON);
|
||||
previousClusterState = clusterState;
|
||||
clusterState = IngestService.innerPut(putRequest, clusterState);
|
||||
ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState));
|
||||
indexRequest.setPipeline("_id1");
|
||||
ingestService.executeBulkRequest(Collections.singletonList(indexRequest), failureHandler, completionHandler, indexReq -> {});
|
||||
final IngestStats afterForthRequestStats = ingestService.stats();
|
||||
assertThat(afterForthRequestStats.getPipelineStats().size(), equalTo(2));
|
||||
//total
|
||||
assertStats(afterForthRequestStats.getTotalStats(), 4, 0 ,0);
|
||||
//pipeline
|
||||
assertPipelineStats(afterForthRequestStats.getPipelineStats(), "_id1", 3, 0, 0);
|
||||
assertPipelineStats(afterForthRequestStats.getPipelineStats(), "_id2", 1, 0, 0);
|
||||
//processor
|
||||
assertProcessorStats(0, afterForthRequestStats, "_id1", 1, 1, 0); //not carried forward since type changed
|
||||
assertProcessorStats(1, afterForthRequestStats, "_id1", 2, 0, 0); //carried forward and added from old stats
|
||||
assertProcessorStats(0, afterForthRequestStats, "_id2", 1, 0, 0);
|
||||
}
|
||||
|
||||
public void testStatName(){
|
||||
Processor processor = mock(Processor.class);
|
||||
String name = randomAlphaOfLength(10);
|
||||
when(processor.getType()).thenReturn(name);
|
||||
assertThat(IngestService.getProcessorName(processor), equalTo(name));
|
||||
String tag = randomAlphaOfLength(10);
|
||||
when(processor.getTag()).thenReturn(tag);
|
||||
assertThat(IngestService.getProcessorName(processor), equalTo(name + ":" + tag));
|
||||
|
||||
ConditionalProcessor conditionalProcessor = mock(ConditionalProcessor.class);
|
||||
when(conditionalProcessor.getProcessor()).thenReturn(processor);
|
||||
assertThat(IngestService.getProcessorName(conditionalProcessor), equalTo(name + ":" + tag));
|
||||
|
||||
PipelineProcessor pipelineProcessor = mock(PipelineProcessor.class);
|
||||
String pipelineName = randomAlphaOfLength(10);
|
||||
when(pipelineProcessor.getPipelineName()).thenReturn(pipelineName);
|
||||
name = PipelineProcessor.TYPE;
|
||||
when(pipelineProcessor.getType()).thenReturn(name);
|
||||
assertThat(IngestService.getProcessorName(pipelineProcessor), equalTo(name + ":" + pipelineName));
|
||||
when(pipelineProcessor.getTag()).thenReturn(tag);
|
||||
assertThat(IngestService.getProcessorName(pipelineProcessor), equalTo(name + ":" + pipelineName + ":" + tag));
|
||||
}
|
||||
|
||||
|
||||
public void testExecuteWithDrop() {
|
||||
Map<String, Processor.Factory> factories = new HashMap<>();
|
||||
factories.put("drop", new DropProcessor.Factory());
|
||||
|
@ -935,4 +1010,23 @@ public class IngestServiceTests extends ESTestCase {
|
|||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private void assertProcessorStats(int processor, IngestStats stats, String pipelineId, long count, long failed, long time) {
|
||||
assertStats(stats.getProcessorStats().get(pipelineId).get(processor).getStats(), count, failed, time);
|
||||
}
|
||||
|
||||
private void assertPipelineStats(List<IngestStats.PipelineStat> pipelineStats, String pipelineId, long count, long failed, long time) {
|
||||
assertStats(getPipelineStats(pipelineStats, pipelineId), count, failed, time);
|
||||
}
|
||||
|
||||
private void assertStats(IngestStats.Stats stats, long count, long failed, long time) {
|
||||
assertThat(stats.getIngestCount(), equalTo(count));
|
||||
assertThat(stats.getIngestCurrent(), equalTo(0L));
|
||||
assertThat(stats.getIngestFailedCount(), equalTo(failed));
|
||||
assertThat(stats.getIngestTimeInMillis(), greaterThanOrEqualTo(time));
|
||||
}
|
||||
|
||||
private IngestStats.Stats getPipelineStats(List<IngestStats.PipelineStat> pipelineStats, String id) {
|
||||
return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,44 +19,70 @@
|
|||
|
||||
package org.elasticsearch.ingest;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class IngestStatsTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
IngestStats.Stats total = new IngestStats.Stats(5, 10, 20, 30);
|
||||
IngestStats.Stats foo = new IngestStats.Stats(50, 100, 200, 300);
|
||||
IngestStats ingestStats = new IngestStats(total, Collections.singletonMap("foo", foo));
|
||||
IngestStats serialize = serialize(ingestStats);
|
||||
assertNotSame(serialize, ingestStats);
|
||||
assertNotSame(serialize.getTotalStats(), total);
|
||||
assertEquals(total.getIngestCount(), serialize.getTotalStats().getIngestCount());
|
||||
assertEquals(total.getIngestFailedCount(), serialize.getTotalStats().getIngestFailedCount());
|
||||
assertEquals(total.getIngestTimeInMillis(), serialize.getTotalStats().getIngestTimeInMillis());
|
||||
assertEquals(total.getIngestCurrent(), serialize.getTotalStats().getIngestCurrent());
|
||||
IngestStats.Stats totalStats = new IngestStats.Stats(50, 100, 200, 300);
|
||||
List<IngestStats.PipelineStat> pipelineStats = createPipelineStats();
|
||||
Map<String, List<IngestStats.ProcessorStat>> processorStats = createProcessorStats(pipelineStats);
|
||||
IngestStats ingestStats = new IngestStats(totalStats, pipelineStats, processorStats);
|
||||
IngestStats serializedStats = serialize(ingestStats);
|
||||
assertIngestStats(ingestStats, serializedStats, true);
|
||||
}
|
||||
|
||||
assertEquals(ingestStats.getStatsPerPipeline().size(), 1);
|
||||
assertTrue(ingestStats.getStatsPerPipeline().containsKey("foo"));
|
||||
public void testReadLegacyStream() throws IOException {
|
||||
IngestStats.Stats totalStats = new IngestStats.Stats(50, 100, 200, 300);
|
||||
List<IngestStats.PipelineStat> pipelineStats = createPipelineStats();
|
||||
|
||||
Map<String, IngestStats.Stats> left = ingestStats.getStatsPerPipeline();
|
||||
Map<String, IngestStats.Stats> right = serialize.getStatsPerPipeline();
|
||||
//legacy output logic
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(VersionUtils.getPreviousVersion(Version.V_6_5_0));
|
||||
totalStats.writeTo(out);
|
||||
out.writeVInt(pipelineStats.size());
|
||||
for (IngestStats.PipelineStat pipelineStat : pipelineStats) {
|
||||
out.writeString(pipelineStat.getPipelineId());
|
||||
pipelineStat.getStats().writeTo(out);
|
||||
}
|
||||
|
||||
assertEquals(right.size(), 1);
|
||||
assertTrue(right.containsKey("foo"));
|
||||
assertEquals(left.size(), 1);
|
||||
assertTrue(left.containsKey("foo"));
|
||||
IngestStats.Stats leftStats = left.get("foo");
|
||||
IngestStats.Stats rightStats = right.get("foo");
|
||||
assertEquals(leftStats.getIngestCount(), rightStats.getIngestCount());
|
||||
assertEquals(leftStats.getIngestFailedCount(), rightStats.getIngestFailedCount());
|
||||
assertEquals(leftStats.getIngestTimeInMillis(), rightStats.getIngestTimeInMillis());
|
||||
assertEquals(leftStats.getIngestCurrent(), rightStats.getIngestCurrent());
|
||||
StreamInput in = out.bytes().streamInput();
|
||||
in.setVersion(VersionUtils.getPreviousVersion(Version.V_6_5_0));
|
||||
IngestStats serializedStats = new IngestStats(in);
|
||||
IngestStats expectedStats = new IngestStats(totalStats, pipelineStats, Collections.emptyMap());
|
||||
assertIngestStats(expectedStats, serializedStats, false);
|
||||
}
|
||||
|
||||
private List<IngestStats.PipelineStat> createPipelineStats() {
|
||||
IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat("pipeline1", new IngestStats.Stats(3, 3, 3, 3));
|
||||
IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat("pipeline2", new IngestStats.Stats(47, 97, 197, 297));
|
||||
IngestStats.PipelineStat pipeline3Stats = new IngestStats.PipelineStat("pipeline3", new IngestStats.Stats(0, 0, 0, 0));
|
||||
return Stream.of(pipeline1Stats, pipeline2Stats, pipeline3Stats).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private Map<String, List<IngestStats.ProcessorStat>> createProcessorStats(List<IngestStats.PipelineStat> pipelineStats){
|
||||
assert(pipelineStats.size() >= 2);
|
||||
IngestStats.ProcessorStat processor1Stat = new IngestStats.ProcessorStat("processor1", new IngestStats.Stats(1, 1, 1, 1));
|
||||
IngestStats.ProcessorStat processor2Stat = new IngestStats.ProcessorStat("processor2", new IngestStats.Stats(2, 2, 2, 2));
|
||||
IngestStats.ProcessorStat processor3Stat = new IngestStats.ProcessorStat("processor3", new IngestStats.Stats(47, 97, 197, 297));
|
||||
//pipeline1 -> processor1,processor2; pipeline2 -> processor3
|
||||
return MapBuilder.<String, List<IngestStats.ProcessorStat>>newMapBuilder()
|
||||
.put(pipelineStats.get(0).getPipelineId(), Stream.of(processor1Stat, processor2Stat).collect(Collectors.toList()))
|
||||
.put(pipelineStats.get(1).getPipelineId(), Collections.singletonList(processor3Stat))
|
||||
.map();
|
||||
}
|
||||
|
||||
private IngestStats serialize(IngestStats stats) throws IOException {
|
||||
|
@ -65,4 +91,48 @@ public class IngestStatsTests extends ESTestCase {
|
|||
StreamInput in = out.bytes().streamInput();
|
||||
return new IngestStats(in);
|
||||
}
|
||||
|
||||
private void assertIngestStats(IngestStats ingestStats, IngestStats serializedStats, boolean expectProcessors){
|
||||
assertNotSame(ingestStats, serializedStats);
|
||||
assertNotSame(ingestStats.getTotalStats(), serializedStats.getTotalStats());
|
||||
assertNotSame(ingestStats.getPipelineStats(), serializedStats.getPipelineStats());
|
||||
assertNotSame(ingestStats.getProcessorStats(), serializedStats.getProcessorStats());
|
||||
|
||||
assertStats(ingestStats.getTotalStats(), serializedStats.getTotalStats());
|
||||
assertEquals(ingestStats.getPipelineStats().size(), serializedStats.getPipelineStats().size());
|
||||
|
||||
for (IngestStats.PipelineStat serializedPipelineStat : serializedStats.getPipelineStats()) {
|
||||
assertStats(getPipelineStats(ingestStats.getPipelineStats(), serializedPipelineStat.getPipelineId()),
|
||||
serializedPipelineStat.getStats());
|
||||
List<IngestStats.ProcessorStat> serializedProcessorStats =
|
||||
serializedStats.getProcessorStats().get(serializedPipelineStat.getPipelineId());
|
||||
List<IngestStats.ProcessorStat> processorStat = ingestStats.getProcessorStats().get(serializedPipelineStat.getPipelineId());
|
||||
if(expectProcessors) {
|
||||
if (processorStat != null) {
|
||||
Iterator<IngestStats.ProcessorStat> it = processorStat.iterator();
|
||||
//intentionally enforcing the identical ordering
|
||||
for (IngestStats.ProcessorStat serializedProcessorStat : serializedProcessorStats) {
|
||||
IngestStats.ProcessorStat ps = it.next();
|
||||
assertEquals(ps.getName(), serializedProcessorStat.getName());
|
||||
assertStats(ps.getStats(), serializedProcessorStat.getStats());
|
||||
}
|
||||
assertFalse(it.hasNext());
|
||||
}
|
||||
}else{
|
||||
//pre 6.5 did not serialize any processor stats
|
||||
assertNull(serializedProcessorStats);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
private void assertStats(IngestStats.Stats fromObject, IngestStats.Stats fromStream) {
|
||||
assertEquals(fromObject.getIngestCount(), fromStream.getIngestCount());
|
||||
assertEquals(fromObject.getIngestFailedCount(), fromStream.getIngestFailedCount());
|
||||
assertEquals(fromObject.getIngestTimeInMillis(), fromStream.getIngestTimeInMillis());
|
||||
assertEquals(fromObject.getIngestCurrent(), fromStream.getIngestCurrent());
|
||||
}
|
||||
|
||||
private IngestStats.Stats getPipelineStats(List<IngestStats.PipelineStat> pipelineStats, String id) {
|
||||
return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,12 +21,13 @@ package org.elasticsearch.ingest;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.time.Clock;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -62,7 +63,7 @@ public class PipelineProcessorTests extends ESTestCase {
|
|||
when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("pipeline", pipelineId);
|
||||
config.put("name", pipelineId);
|
||||
factory.create(Collections.emptyMap(), null, config).execute(testIngestDocument);
|
||||
assertEquals(testIngestDocument, invoked.get());
|
||||
}
|
||||
|
@ -72,7 +73,7 @@ public class PipelineProcessorTests extends ESTestCase {
|
|||
IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("pipeline", "missingPipelineId");
|
||||
config.put("name", "missingPipelineId");
|
||||
IllegalStateException e = expectThrows(
|
||||
IllegalStateException.class,
|
||||
() -> factory.create(Collections.emptyMap(), null, config).execute(testIngestDocument)
|
||||
|
@ -88,21 +89,21 @@ public class PipelineProcessorTests extends ESTestCase {
|
|||
IngestService ingestService = mock(IngestService.class);
|
||||
IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
Map<String, Object> outerConfig = new HashMap<>();
|
||||
outerConfig.put("pipeline", innerPipelineId);
|
||||
outerConfig.put("name", innerPipelineId);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
Pipeline outer = new Pipeline(
|
||||
outerPipelineId, null, null,
|
||||
new CompoundProcessor(factory.create(Collections.emptyMap(), null, outerConfig))
|
||||
);
|
||||
Map<String, Object> innerConfig = new HashMap<>();
|
||||
innerConfig.put("pipeline", outerPipelineId);
|
||||
innerConfig.put("name", outerPipelineId);
|
||||
Pipeline inner = new Pipeline(
|
||||
innerPipelineId, null, null,
|
||||
new CompoundProcessor(factory.create(Collections.emptyMap(), null, innerConfig))
|
||||
);
|
||||
when(ingestService.getPipeline(outerPipelineId)).thenReturn(outer);
|
||||
when(ingestService.getPipeline(innerPipelineId)).thenReturn(inner);
|
||||
outerConfig.put("pipeline", innerPipelineId);
|
||||
outerConfig.put("name", innerPipelineId);
|
||||
ElasticsearchException e = expectThrows(
|
||||
ElasticsearchException.class,
|
||||
() -> factory.create(Collections.emptyMap(), null, outerConfig).execute(testIngestDocument)
|
||||
|
@ -117,7 +118,7 @@ public class PipelineProcessorTests extends ESTestCase {
|
|||
IngestService ingestService = mock(IngestService.class);
|
||||
IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
Map<String, Object> outerConfig = new HashMap<>();
|
||||
outerConfig.put("pipeline", innerPipelineId);
|
||||
outerConfig.put("name", innerPipelineId);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
Pipeline inner = new Pipeline(
|
||||
innerPipelineId, null, null, new CompoundProcessor()
|
||||
|
@ -136,22 +137,22 @@ public class PipelineProcessorTests extends ESTestCase {
|
|||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
|
||||
Map<String, Object> pipeline1ProcessorConfig = new HashMap<>();
|
||||
pipeline1ProcessorConfig.put("pipeline", pipeline2Id);
|
||||
pipeline1ProcessorConfig.put("name", pipeline2Id);
|
||||
PipelineProcessor pipeline1Processor = factory.create(Collections.emptyMap(), null, pipeline1ProcessorConfig);
|
||||
|
||||
Map<String, Object> pipeline2ProcessorConfig = new HashMap<>();
|
||||
pipeline2ProcessorConfig.put("pipeline", pipeline3Id);
|
||||
pipeline2ProcessorConfig.put("name", pipeline3Id);
|
||||
PipelineProcessor pipeline2Processor = factory.create(Collections.emptyMap(), null, pipeline2ProcessorConfig);
|
||||
|
||||
Clock clock = mock(Clock.class);
|
||||
when(clock.millis()).thenReturn(0L).thenReturn(0L);
|
||||
LongSupplier relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L);
|
||||
Pipeline pipeline1 = new Pipeline(
|
||||
pipeline1Id, null, null, new CompoundProcessor(pipeline1Processor), clock
|
||||
pipeline1Id, null, null, new CompoundProcessor(pipeline1Processor), relativeTimeProvider
|
||||
);
|
||||
|
||||
String key1 = randomAlphaOfLength(10);
|
||||
clock = mock(Clock.class);
|
||||
when(clock.millis()).thenReturn(0L).thenReturn(3L);
|
||||
relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(3));
|
||||
Pipeline pipeline2 = new Pipeline(
|
||||
pipeline2Id, null, null, new CompoundProcessor(true,
|
||||
Arrays.asList(
|
||||
|
@ -160,15 +161,15 @@ public class PipelineProcessorTests extends ESTestCase {
|
|||
}),
|
||||
pipeline2Processor),
|
||||
Collections.emptyList()),
|
||||
clock
|
||||
relativeTimeProvider
|
||||
);
|
||||
clock = mock(Clock.class);
|
||||
when(clock.millis()).thenReturn(0L).thenReturn(2L);
|
||||
relativeTimeProvider = mock(LongSupplier.class);
|
||||
when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(2));
|
||||
Pipeline pipeline3 = new Pipeline(
|
||||
pipeline3Id, null, null, new CompoundProcessor(
|
||||
new TestProcessor(ingestDocument -> {
|
||||
throw new RuntimeException("error");
|
||||
})), clock
|
||||
})), relativeTimeProvider
|
||||
);
|
||||
when(ingestService.getPipeline(pipeline1Id)).thenReturn(pipeline1);
|
||||
when(ingestService.getPipeline(pipeline2Id)).thenReturn(pipeline2);
|
||||
|
|
|
@ -21,17 +21,22 @@ package org.elasticsearch.ingest;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ingest.SimulateProcessorResult;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.script.MockScriptEngine;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.ingest.CompoundProcessor.ON_FAILURE_MESSAGE_FIELD;
|
||||
import static org.elasticsearch.ingest.CompoundProcessor.ON_FAILURE_PROCESSOR_TAG_FIELD;
|
||||
|
@ -39,10 +44,11 @@ import static org.elasticsearch.ingest.CompoundProcessor.ON_FAILURE_PROCESSOR_TY
|
|||
import static org.elasticsearch.ingest.TrackingResultProcessor.decorate;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -50,13 +56,11 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
|
||||
private IngestDocument ingestDocument;
|
||||
private List<SimulateProcessorResult> resultList;
|
||||
private Set<PipelineProcessor> pipelinesSeen;
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>());
|
||||
resultList = new ArrayList<>();
|
||||
pipelinesSeen = Collections.newSetFromMap(new IdentityHashMap<>());
|
||||
}
|
||||
|
||||
public void testActualProcessor() throws Exception {
|
||||
|
@ -78,7 +82,7 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
RuntimeException exception = new RuntimeException("processor failed");
|
||||
TestProcessor testProcessor = new TestProcessor(ingestDocument -> { throw exception; });
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(testProcessor);
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList, pipelinesSeen);
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
try {
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
@ -104,7 +108,7 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
Arrays.asList(failProcessor, onFailureProcessor),
|
||||
Arrays.asList(onFailureProcessor, failProcessor))),
|
||||
Arrays.asList(onFailureProcessor));
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList, pipelinesSeen);
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
||||
SimulateProcessorResult expectedFailResult = new SimulateProcessorResult(failProcessor.getTag(), ingestDocument);
|
||||
|
@ -142,7 +146,7 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
TestProcessor testProcessor = new TestProcessor(ingestDocument -> { throw exception; });
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(true, Collections.singletonList(testProcessor),
|
||||
Collections.emptyList());
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList, pipelinesSeen);
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
||||
|
@ -154,11 +158,50 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
assertThat(resultList.get(0).getProcessorTag(), equalTo(expectedResult.getProcessorTag()));
|
||||
}
|
||||
|
||||
public void testActualCompoundProcessorWithFalseConditional() throws Exception {
|
||||
String key1 = randomAlphaOfLength(10);
|
||||
String key2 = randomAlphaOfLength(10);
|
||||
String key3 = randomAlphaOfLength(10);
|
||||
|
||||
String scriptName = "conditionalScript";
|
||||
ScriptService scriptService = new ScriptService(Settings.builder().build(), Collections.singletonMap(Script.DEFAULT_SCRIPT_LANG,
|
||||
new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Collections.singletonMap(scriptName, ctx -> false), Collections.emptyMap())),
|
||||
new HashMap<>(ScriptModule.CORE_CONTEXTS)
|
||||
);
|
||||
|
||||
CompoundProcessor compoundProcessor = new CompoundProcessor(
|
||||
new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue(key1, randomInt()); }),
|
||||
new ConditionalProcessor(
|
||||
randomAlphaOfLength(10),
|
||||
new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), scriptService,
|
||||
new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue(key2, randomInt()); })),
|
||||
new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key3, randomInt()); }));
|
||||
|
||||
CompoundProcessor trackingProcessor = decorate(compoundProcessor, resultList);
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
SimulateProcessorResult expectedResult = new SimulateProcessorResult(compoundProcessor.getTag(), ingestDocument);
|
||||
|
||||
//the step for key 2 is never executed due to conditional and thus not part of the result set
|
||||
assertThat(resultList.size(), equalTo(2));
|
||||
|
||||
assertTrue(resultList.get(0).getIngestDocument().hasField(key1));
|
||||
assertFalse(resultList.get(0).getIngestDocument().hasField(key2));
|
||||
assertFalse(resultList.get(0).getIngestDocument().hasField(key3));
|
||||
|
||||
assertTrue(resultList.get(1).getIngestDocument().hasField(key1));
|
||||
assertFalse(resultList.get(1).getIngestDocument().hasField(key2));
|
||||
assertTrue(resultList.get(1).getIngestDocument().hasField(key3));
|
||||
|
||||
assertThat(resultList.get(1).getIngestDocument(), equalTo(expectedResult.getIngestDocument()));
|
||||
assertThat(resultList.get(1).getFailure(), nullValue());
|
||||
assertThat(resultList.get(1).getProcessorTag(), nullValue());
|
||||
}
|
||||
|
||||
public void testActualPipelineProcessor() throws Exception {
|
||||
String pipelineId = "pipeline1";
|
||||
IngestService ingestService = mock(IngestService.class);
|
||||
Map<String, Object> pipelineConfig = new HashMap<>();
|
||||
pipelineConfig.put("pipeline", pipelineId);
|
||||
pipelineConfig.put("name", pipelineId);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
|
||||
String key1 = randomAlphaOfLength(10);
|
||||
|
@ -176,13 +219,13 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, pipelineConfig);
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(pipelineProcessor);
|
||||
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList, pipelinesSeen);
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
||||
SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument);
|
||||
|
||||
verify(ingestService).getPipeline(pipelineId);
|
||||
verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId);
|
||||
assertThat(resultList.size(), equalTo(3));
|
||||
|
||||
assertTrue(resultList.get(0).getIngestDocument().hasField(key1));
|
||||
|
@ -198,13 +241,149 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
assertThat(resultList.get(2).getProcessorTag(), nullValue());
|
||||
}
|
||||
|
||||
public void testActualPipelineProcessorWithTrueConditional() throws Exception {
|
||||
String pipelineId1 = "pipeline1";
|
||||
String pipelineId2 = "pipeline2";
|
||||
IngestService ingestService = mock(IngestService.class);
|
||||
Map<String, Object> pipelineConfig0 = new HashMap<>();
|
||||
pipelineConfig0.put("name", pipelineId1);
|
||||
Map<String, Object> pipelineConfig1 = new HashMap<>();
|
||||
pipelineConfig1.put("name", pipelineId1);
|
||||
Map<String, Object> pipelineConfig2 = new HashMap<>();
|
||||
pipelineConfig2.put("name", pipelineId2);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
|
||||
String key1 = randomAlphaOfLength(10);
|
||||
String key2 = randomAlphaOfLength(10);
|
||||
String key3 = randomAlphaOfLength(10);
|
||||
|
||||
String scriptName = "conditionalScript";
|
||||
|
||||
ScriptService scriptService = new ScriptService(Settings.builder().build(), Collections.singletonMap(Script.DEFAULT_SCRIPT_LANG,
|
||||
new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Collections.singletonMap(scriptName, ctx -> true), Collections.emptyMap())),
|
||||
new HashMap<>(ScriptModule.CORE_CONTEXTS)
|
||||
);
|
||||
|
||||
Pipeline pipeline1 = new Pipeline(
|
||||
pipelineId1, null, null, new CompoundProcessor(
|
||||
new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue(key1, randomInt()); }),
|
||||
new ConditionalProcessor(
|
||||
randomAlphaOfLength(10),
|
||||
new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), scriptService,
|
||||
factory.create(Collections.emptyMap(), null, pipelineConfig2)),
|
||||
new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue(key3, randomInt()); })
|
||||
)
|
||||
);
|
||||
|
||||
Pipeline pipeline2 = new Pipeline(
|
||||
pipelineId2, null, null, new CompoundProcessor(
|
||||
new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); })));
|
||||
|
||||
when(ingestService.getPipeline(pipelineId1)).thenReturn(pipeline1);
|
||||
when(ingestService.getPipeline(pipelineId2)).thenReturn(pipeline2);
|
||||
|
||||
PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, pipelineConfig0);
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(pipelineProcessor);
|
||||
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
||||
|
||||
SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument);
|
||||
|
||||
verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId1);
|
||||
verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId2);
|
||||
assertThat(resultList.size(), equalTo(3));
|
||||
|
||||
assertTrue(resultList.get(0).getIngestDocument().hasField(key1));
|
||||
assertFalse(resultList.get(0).getIngestDocument().hasField(key2));
|
||||
assertFalse(resultList.get(0).getIngestDocument().hasField(key3));
|
||||
|
||||
assertTrue(resultList.get(1).getIngestDocument().hasField(key1));
|
||||
assertTrue(resultList.get(1).getIngestDocument().hasField(key2));
|
||||
assertFalse(resultList.get(1).getIngestDocument().hasField(key3));
|
||||
|
||||
assertThat(resultList.get(2).getIngestDocument(), equalTo(expectedResult.getIngestDocument()));
|
||||
assertThat(resultList.get(2).getFailure(), nullValue());
|
||||
assertThat(resultList.get(2).getProcessorTag(), nullValue());
|
||||
}
|
||||
|
||||
public void testActualPipelineProcessorWithFalseConditional() throws Exception {
|
||||
String pipelineId1 = "pipeline1";
|
||||
String pipelineId2 = "pipeline2";
|
||||
IngestService ingestService = mock(IngestService.class);
|
||||
Map<String, Object> pipelineConfig0 = new HashMap<>();
|
||||
pipelineConfig0.put("name", pipelineId1);
|
||||
Map<String, Object> pipelineConfig1 = new HashMap<>();
|
||||
pipelineConfig1.put("name", pipelineId1);
|
||||
Map<String, Object> pipelineConfig2 = new HashMap<>();
|
||||
pipelineConfig2.put("name", pipelineId2);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
|
||||
String key1 = randomAlphaOfLength(10);
|
||||
String key2 = randomAlphaOfLength(10);
|
||||
String key3 = randomAlphaOfLength(10);
|
||||
|
||||
String scriptName = "conditionalScript";
|
||||
|
||||
ScriptService scriptService = new ScriptService(Settings.builder().build(), Collections.singletonMap(Script.DEFAULT_SCRIPT_LANG,
|
||||
new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Collections.singletonMap(scriptName, ctx -> false), Collections.emptyMap())),
|
||||
new HashMap<>(ScriptModule.CORE_CONTEXTS)
|
||||
);
|
||||
|
||||
Pipeline pipeline1 = new Pipeline(
|
||||
pipelineId1, null, null, new CompoundProcessor(
|
||||
new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue(key1, randomInt()); }),
|
||||
new ConditionalProcessor(
|
||||
randomAlphaOfLength(10),
|
||||
new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), scriptService,
|
||||
factory.create(Collections.emptyMap(), null, pipelineConfig2)),
|
||||
new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue(key3, randomInt()); })
|
||||
)
|
||||
);
|
||||
|
||||
Pipeline pipeline2 = new Pipeline(
|
||||
pipelineId2, null, null, new CompoundProcessor(
|
||||
new TestProcessor(ingestDocument -> { ingestDocument.setFieldValue(key2, randomInt()); })));
|
||||
|
||||
when(ingestService.getPipeline(pipelineId1)).thenReturn(pipeline1);
|
||||
when(ingestService.getPipeline(pipelineId2)).thenReturn(pipeline2);
|
||||
|
||||
PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, pipelineConfig0);
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(pipelineProcessor);
|
||||
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
||||
|
||||
SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument);
|
||||
|
||||
verify(ingestService, Mockito.atLeast(1)).getPipeline(pipelineId1);
|
||||
verify(ingestService, Mockito.never()).getPipeline(pipelineId2);
|
||||
assertThat(resultList.size(), equalTo(2));
|
||||
|
||||
assertTrue(resultList.get(0).getIngestDocument().hasField(key1));
|
||||
assertFalse(resultList.get(0).getIngestDocument().hasField(key2));
|
||||
assertFalse(resultList.get(0).getIngestDocument().hasField(key3));
|
||||
|
||||
assertTrue(resultList.get(1).getIngestDocument().hasField(key1));
|
||||
assertFalse(resultList.get(1).getIngestDocument().hasField(key2));
|
||||
assertTrue(resultList.get(1).getIngestDocument().hasField(key3));
|
||||
|
||||
assertThat(resultList.get(1).getIngestDocument(), equalTo(expectedResult.getIngestDocument()));
|
||||
assertThat(resultList.get(1).getFailure(), nullValue());
|
||||
assertThat(resultList.get(1).getProcessorTag(), nullValue());
|
||||
}
|
||||
|
||||
public void testActualPipelineProcessorWithHandledFailure() throws Exception {
|
||||
RuntimeException exception = new RuntimeException("processor failed");
|
||||
|
||||
String pipelineId = "pipeline1";
|
||||
IngestService ingestService = mock(IngestService.class);
|
||||
Map<String, Object> pipelineConfig = new HashMap<>();
|
||||
pipelineConfig.put("pipeline", pipelineId);
|
||||
pipelineConfig.put("name", pipelineId);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
|
||||
String key1 = randomAlphaOfLength(10);
|
||||
|
@ -226,13 +405,13 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, pipelineConfig);
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(pipelineProcessor);
|
||||
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList, pipelinesSeen);
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
||||
SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument);
|
||||
|
||||
verify(ingestService).getPipeline(pipelineId);
|
||||
verify(ingestService, Mockito.atLeast(2)).getPipeline(pipelineId);
|
||||
assertThat(resultList.size(), equalTo(4));
|
||||
|
||||
assertTrue(resultList.get(0).getIngestDocument().hasField(key1));
|
||||
|
@ -253,31 +432,42 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testActualPipelineProcessorWithCycle() throws Exception {
|
||||
String pipelineId = "pipeline1";
|
||||
String pipelineId1 = "pipeline1";
|
||||
String pipelineId2 = "pipeline2";
|
||||
IngestService ingestService = mock(IngestService.class);
|
||||
Map<String, Object> pipelineConfig = new HashMap<>();
|
||||
pipelineConfig.put("pipeline", pipelineId);
|
||||
Map<String, Object> pipelineConfig0 = new HashMap<>();
|
||||
pipelineConfig0.put("name", pipelineId1);
|
||||
Map<String, Object> pipelineConfig1 = new HashMap<>();
|
||||
pipelineConfig1.put("name", pipelineId1);
|
||||
Map<String, Object> pipelineConfig2 = new HashMap<>();
|
||||
pipelineConfig2.put("name", pipelineId2);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
|
||||
PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, pipelineConfig);
|
||||
Pipeline pipeline = new Pipeline(
|
||||
pipelineId, null, null, new CompoundProcessor(pipelineProcessor)
|
||||
);
|
||||
when(ingestService.getPipeline(pipelineId)).thenReturn(pipeline);
|
||||
Pipeline pipeline1 = new Pipeline(
|
||||
pipelineId1, null, null, new CompoundProcessor(factory.create(Collections.emptyMap(), null, pipelineConfig2)));
|
||||
|
||||
Pipeline pipeline2 = new Pipeline(
|
||||
pipelineId2, null, null, new CompoundProcessor(factory.create(Collections.emptyMap(), null, pipelineConfig1)));
|
||||
|
||||
when(ingestService.getPipeline(pipelineId1)).thenReturn(pipeline1);
|
||||
when(ingestService.getPipeline(pipelineId2)).thenReturn(pipeline2);
|
||||
|
||||
PipelineProcessor pipelineProcessor = factory.create(Collections.emptyMap(), null, pipelineConfig0);
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(pipelineProcessor);
|
||||
|
||||
IllegalStateException exception = expectThrows(IllegalStateException.class,
|
||||
() -> decorate(actualProcessor, resultList, pipelinesSeen));
|
||||
assertThat(exception.getMessage(), equalTo("Cycle detected for pipeline: pipeline1"));
|
||||
}
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> trackingProcessor.execute(ingestDocument));
|
||||
assertThat(exception.getCause(), instanceOf(IllegalArgumentException.class));
|
||||
assertThat(exception.getCause().getCause(), instanceOf(IllegalStateException.class));
|
||||
assertThat(exception.getMessage(), containsString("Cycle detected for pipeline: pipeline1"));
|
||||
}
|
||||
|
||||
public void testActualPipelineProcessorRepeatedInvocation() throws Exception {
|
||||
String pipelineId = "pipeline1";
|
||||
IngestService ingestService = mock(IngestService.class);
|
||||
Map<String, Object> pipelineConfig = new HashMap<>();
|
||||
pipelineConfig.put("pipeline", pipelineId);
|
||||
pipelineConfig.put("name", pipelineId);
|
||||
PipelineProcessor.Factory factory = new PipelineProcessor.Factory(ingestService);
|
||||
|
||||
String key1 = randomAlphaOfLength(10);
|
||||
|
@ -290,13 +480,13 @@ public class TrackingResultProcessorTests extends ESTestCase {
|
|||
|
||||
CompoundProcessor actualProcessor = new CompoundProcessor(pipelineProcessor, pipelineProcessor);
|
||||
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList, pipelinesSeen);
|
||||
CompoundProcessor trackingProcessor = decorate(actualProcessor, resultList);
|
||||
|
||||
trackingProcessor.execute(ingestDocument);
|
||||
|
||||
SimulateProcessorResult expectedResult = new SimulateProcessorResult(actualProcessor.getTag(), ingestDocument);
|
||||
|
||||
verify(ingestService, times(2)).getPipeline(pipelineId);
|
||||
verify(ingestService, Mockito.atLeast(2)).getPipeline(pipelineId);
|
||||
assertThat(resultList.size(), equalTo(2));
|
||||
|
||||
assertThat(resultList.get(0).getIngestDocument(), not(equalTo(expectedResult.getIngestDocument())));
|
||||
|
|
|
@ -68,6 +68,7 @@ import java.security.KeyStoreException;
|
|||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.cert.CertificateException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -449,7 +450,7 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void wipeRollupJobs() throws IOException {
|
||||
private void wipeRollupJobs() throws IOException, InterruptedException {
|
||||
Response response = adminClient().performRequest(new Request("GET", "/_xpack/rollup/job/_all"));
|
||||
Map<String, Object> jobs = entityAsMap(response);
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -460,6 +461,29 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
return;
|
||||
}
|
||||
|
||||
for (Map<String, Object> jobConfig : jobConfigs) {
|
||||
@SuppressWarnings("unchecked")
|
||||
String jobId = (String) ((Map<String, Object>) jobConfig.get("config")).get("id");
|
||||
Request request = new Request("POST", "/_xpack/rollup/job/" + jobId + "/_stop");
|
||||
request.addParameter("ignore", "404");
|
||||
logger.debug("stopping rollup job [{}]", jobId);
|
||||
adminClient().performRequest(request);
|
||||
}
|
||||
|
||||
// TODO this is temporary until StopJob API gains the ability to block until stopped
|
||||
awaitBusy(() -> {
|
||||
Request request = new Request("GET", "/_xpack/rollup/job/_all");
|
||||
try {
|
||||
Response jobsResponse = adminClient().performRequest(request);
|
||||
String body = EntityUtils.toString(jobsResponse.getEntity());
|
||||
logger.error(body);
|
||||
// If the body contains any of the non-stopped states, at least one job is not finished yet
|
||||
return Arrays.stream(new String[]{"started", "aborting", "stopping", "indexing"}).noneMatch(body::contains);
|
||||
} catch (IOException e) {
|
||||
return false;
|
||||
}
|
||||
}, 10, TimeUnit.SECONDS);
|
||||
|
||||
for (Map<String, Object> jobConfig : jobConfigs) {
|
||||
@SuppressWarnings("unchecked")
|
||||
String jobId = (String) ((Map<String, Object>) jobConfig.get("config")).get("id");
|
||||
|
|
|
@ -50,8 +50,8 @@ public class CcrMultiClusterLicenseIT extends ESRestTestCase {
|
|||
public void testAutoFollow() throws Exception {
|
||||
assumeFalse("windows is the worst", Constants.WINDOWS);
|
||||
if (runningAgainstLeaderCluster == false) {
|
||||
final Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster");
|
||||
request.setJsonEntity("{\"leader_index_patterns\":[\"*\"]}");
|
||||
final Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern");
|
||||
request.setJsonEntity("{\"leader_index_patterns\":[\"*\"], \"leader_cluster\": \"leader_cluster\"}");
|
||||
client().performRequest(request);
|
||||
|
||||
// parse the logs and ensure that the auto-coordinator skipped coordination on the leader cluster
|
||||
|
@ -64,7 +64,7 @@ public class CcrMultiClusterLicenseIT extends ESRestTestCase {
|
|||
while (it.hasNext()) {
|
||||
final String line = it.next();
|
||||
if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " +
|
||||
"failure occurred while fetching cluster state in leader cluster \\[leader_cluster\\]")) {
|
||||
"failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) {
|
||||
warn = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -146,14 +146,14 @@ public class FollowIndexSecurityIT extends ESRestTestCase {
|
|||
String disallowedIndex = "logs-us-20190101";
|
||||
|
||||
{
|
||||
Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster");
|
||||
request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"]}");
|
||||
Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern");
|
||||
request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"], \"leader_cluster\": \"leader_cluster\"}");
|
||||
Exception e = expectThrows(ResponseException.class, () -> assertOK(client().performRequest(request)));
|
||||
assertThat(e.getMessage(), containsString("insufficient privileges to follow index [logs-*]"));
|
||||
}
|
||||
|
||||
Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster");
|
||||
request.setJsonEntity("{\"leader_index_patterns\": [\"logs-eu-*\"]}");
|
||||
Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern");
|
||||
request.setJsonEntity("{\"leader_index_patterns\": [\"logs-eu-*\"], \"leader_cluster\": \"leader_cluster\"}");
|
||||
assertOK(client().performRequest(request));
|
||||
|
||||
try (RestClient leaderClient = buildLeaderClient()) {
|
||||
|
@ -185,7 +185,7 @@ public class FollowIndexSecurityIT extends ESRestTestCase {
|
|||
});
|
||||
|
||||
// Cleanup by deleting auto follow pattern and pause following:
|
||||
request = new Request("DELETE", "/_ccr/auto_follow/leader_cluster");
|
||||
request = new Request("DELETE", "/_ccr/auto_follow/test_pattern");
|
||||
assertOK(client().performRequest(request));
|
||||
pauseFollow(allowedIndex);
|
||||
}
|
||||
|
|
|
@ -103,8 +103,8 @@ public class FollowIndexIT extends ESRestTestCase {
|
|||
public void testAutoFollowPatterns() throws Exception {
|
||||
assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster);
|
||||
|
||||
Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster");
|
||||
request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"]}");
|
||||
Request request = new Request("PUT", "/_ccr/auto_follow/test_pattern");
|
||||
request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"], \"leader_cluster\": \"leader_cluster\"}");
|
||||
assertOK(client().performRequest(request));
|
||||
|
||||
try (RestClient leaderClient = buildLeaderClient()) {
|
||||
|
|
|
@ -21,29 +21,32 @@
|
|||
|
||||
- do:
|
||||
ccr.put_auto_follow_pattern:
|
||||
leader_cluster: local
|
||||
name: my_pattern
|
||||
body:
|
||||
leader_cluster: local
|
||||
leader_index_patterns: ['logs-*']
|
||||
max_concurrent_read_batches: 2
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
ccr.get_auto_follow_pattern:
|
||||
leader_cluster: local
|
||||
- match: { local.leader_index_patterns: ['logs-*'] }
|
||||
- match: { local.max_concurrent_read_batches: 2 }
|
||||
name: my_pattern
|
||||
- match: { my_pattern.leader_cluster: 'local' }
|
||||
- match: { my_pattern.leader_index_patterns: ['logs-*'] }
|
||||
- match: { my_pattern.max_concurrent_read_batches: 2 }
|
||||
|
||||
- do:
|
||||
ccr.get_auto_follow_pattern: {}
|
||||
- match: { local.leader_index_patterns: ['logs-*'] }
|
||||
- match: { local.max_concurrent_read_batches: 2 }
|
||||
- match: { my_pattern.leader_cluster: 'local' }
|
||||
- match: { my_pattern.leader_index_patterns: ['logs-*'] }
|
||||
- match: { my_pattern.max_concurrent_read_batches: 2 }
|
||||
|
||||
- do:
|
||||
ccr.delete_auto_follow_pattern:
|
||||
leader_cluster: local
|
||||
name: my_pattern
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
ccr.get_auto_follow_pattern:
|
||||
leader_cluster: local
|
||||
name: my_pattern
|
||||
|
|
|
@ -47,6 +47,7 @@ import java.util.TreeMap;
|
|||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A component that runs only on the elected master node and follows leader indices automatically
|
||||
|
@ -105,19 +106,19 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
synchronized void updateStats(List<AutoFollowResult> results) {
|
||||
for (AutoFollowResult result : results) {
|
||||
if (result.clusterStateFetchException != null) {
|
||||
recentAutoFollowErrors.put(result.clusterAlias,
|
||||
recentAutoFollowErrors.put(result.autoFollowPatternName,
|
||||
new ElasticsearchException(result.clusterStateFetchException));
|
||||
numberOfFailedRemoteClusterStateRequests++;
|
||||
LOGGER.warn(new ParameterizedMessage("failure occurred while fetching cluster state in leader cluster [{}]",
|
||||
result.clusterAlias), result.clusterStateFetchException);
|
||||
LOGGER.warn(new ParameterizedMessage("failure occurred while fetching cluster state for auto follow pattern [{}]",
|
||||
result.autoFollowPatternName), result.clusterStateFetchException);
|
||||
} else {
|
||||
for (Map.Entry<Index, Exception> entry : result.autoFollowExecutionResults.entrySet()) {
|
||||
if (entry.getValue() != null) {
|
||||
numberOfFailedIndicesAutoFollowed++;
|
||||
recentAutoFollowErrors.put(result.clusterAlias + ":" + entry.getKey().getName(),
|
||||
recentAutoFollowErrors.put(result.autoFollowPatternName + ":" + entry.getKey().getName(),
|
||||
ExceptionsHelper.convertToElastic(entry.getValue()));
|
||||
LOGGER.warn(new ParameterizedMessage("failure occurred while auto following index [{}] in leader cluster [{}]",
|
||||
entry.getKey(), result.clusterAlias), entry.getValue());
|
||||
LOGGER.warn(new ParameterizedMessage("failure occurred while auto following index [{}] for auto follow " +
|
||||
"pattern [{}]", entry.getKey(), result.autoFollowPatternName), entry.getValue());
|
||||
} else {
|
||||
numberOfSuccessfulIndicesAutoFollowed++;
|
||||
}
|
||||
|
@ -243,34 +244,45 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
int i = 0;
|
||||
for (Map.Entry<String, AutoFollowPattern> entry : autoFollowMetadata.getPatterns().entrySet()) {
|
||||
final int slot = i;
|
||||
final String clusterAlias = entry.getKey();
|
||||
final String autoFollowPattenName = entry.getKey();
|
||||
final AutoFollowPattern autoFollowPattern = entry.getValue();
|
||||
final String leaderCluster = autoFollowPattern.getLeaderCluster();
|
||||
|
||||
Map<String, String> headers = autoFollowMetadata.getHeaders().get(clusterAlias);
|
||||
getLeaderClusterState(headers, clusterAlias, (leaderClusterState, e) -> {
|
||||
Map<String, String> headers = autoFollowMetadata.getHeaders().get(autoFollowPattenName);
|
||||
getLeaderClusterState(headers, leaderCluster, (leaderClusterState, e) -> {
|
||||
if (leaderClusterState != null) {
|
||||
assert e == null;
|
||||
final List<String> followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(clusterAlias);
|
||||
final List<Index> leaderIndicesToFollow = getLeaderIndicesToFollow(clusterAlias, autoFollowPattern,
|
||||
final List<String> followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(autoFollowPattenName);
|
||||
final List<Index> leaderIndicesToFollow = getLeaderIndicesToFollow(leaderCluster, autoFollowPattern,
|
||||
leaderClusterState, followerClusterState, followedIndices);
|
||||
if (leaderIndicesToFollow.isEmpty()) {
|
||||
finalise(slot, new AutoFollowResult(clusterAlias));
|
||||
finalise(slot, new AutoFollowResult(autoFollowPattenName));
|
||||
} else {
|
||||
List<Tuple<String, AutoFollowPattern>> patternsForTheSameLeaderCluster = autoFollowMetadata.getPatterns()
|
||||
.entrySet().stream()
|
||||
.filter(item -> autoFollowPattenName.equals(item.getKey()) == false)
|
||||
.filter(item -> leaderCluster.equals(item.getValue().getLeaderCluster()))
|
||||
.map(item -> new Tuple<>(item.getKey(), item.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
Consumer<AutoFollowResult> resultHandler = result -> finalise(slot, result);
|
||||
checkAutoFollowPattern(clusterAlias, autoFollowPattern, leaderIndicesToFollow, headers, resultHandler);
|
||||
checkAutoFollowPattern(autoFollowPattenName, leaderCluster, autoFollowPattern, leaderIndicesToFollow, headers,
|
||||
patternsForTheSameLeaderCluster, resultHandler);
|
||||
}
|
||||
} else {
|
||||
finalise(slot, new AutoFollowResult(clusterAlias, e));
|
||||
finalise(slot, new AutoFollowResult(autoFollowPattenName, e));
|
||||
}
|
||||
});
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
private void checkAutoFollowPattern(String clusterAlias,
|
||||
private void checkAutoFollowPattern(String autoFollowPattenName,
|
||||
String clusterAlias,
|
||||
AutoFollowPattern autoFollowPattern,
|
||||
List<Index> leaderIndicesToFollow,
|
||||
Map<String, String> headers,
|
||||
List<Tuple<String, AutoFollowPattern>> patternsForTheSameLeaderCluster,
|
||||
Consumer<AutoFollowResult> resultHandler) {
|
||||
|
||||
final CountDown leaderIndicesCountDown = new CountDown(leaderIndicesToFollow.size());
|
||||
|
@ -278,16 +290,31 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
for (int i = 0; i < leaderIndicesToFollow.size(); i++) {
|
||||
final Index indexToFollow = leaderIndicesToFollow.get(i);
|
||||
final int slot = i;
|
||||
followLeaderIndex(clusterAlias, indexToFollow, autoFollowPattern, headers, error -> {
|
||||
|
||||
List<String> otherMatchingPatterns = patternsForTheSameLeaderCluster.stream()
|
||||
.filter(otherPattern -> otherPattern.v2().match(indexToFollow.getName()))
|
||||
.map(Tuple::v1)
|
||||
.collect(Collectors.toList());
|
||||
if (otherMatchingPatterns.size() != 0) {
|
||||
results.set(slot, new Tuple<>(indexToFollow, new ElasticsearchException("index to follow [" + indexToFollow.getName() +
|
||||
"] for pattern [" + autoFollowPattenName + "] matches with other patterns " + otherMatchingPatterns + "")));
|
||||
if (leaderIndicesCountDown.countDown()) {
|
||||
resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList()));
|
||||
}
|
||||
} else {
|
||||
followLeaderIndex(autoFollowPattenName, clusterAlias, indexToFollow, autoFollowPattern, headers, error -> {
|
||||
results.set(slot, new Tuple<>(indexToFollow, error));
|
||||
if (leaderIndicesCountDown.countDown()) {
|
||||
resultHandler.accept(new AutoFollowResult(clusterAlias, results.asList()));
|
||||
resultHandler.accept(new AutoFollowResult(autoFollowPattenName, results.asList()));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private void followLeaderIndex(String clusterAlias,
|
||||
private void followLeaderIndex(String autoFollowPattenName,
|
||||
String clusterAlias,
|
||||
Index indexToFollow,
|
||||
AutoFollowPattern pattern,
|
||||
Map<String,String> headers,
|
||||
|
@ -313,7 +340,7 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
|
||||
// This function updates the auto follow metadata in the cluster to record that the leader index has been followed:
|
||||
// (so that we do not try to follow it in subsequent auto follow runs)
|
||||
Function<ClusterState, ClusterState> function = recordLeaderIndexAsFollowFunction(clusterAlias, indexToFollow);
|
||||
Function<ClusterState, ClusterState> function = recordLeaderIndexAsFollowFunction(autoFollowPattenName, indexToFollow);
|
||||
// The coordinator always runs on the elected master node, so we can update cluster state here:
|
||||
updateAutoFollowMetadata(function, onResult);
|
||||
};
|
||||
|
@ -356,12 +383,12 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
}
|
||||
}
|
||||
|
||||
static Function<ClusterState, ClusterState> recordLeaderIndexAsFollowFunction(String clusterAlias,
|
||||
static Function<ClusterState, ClusterState> recordLeaderIndexAsFollowFunction(String name,
|
||||
Index indexToFollow) {
|
||||
return currentState -> {
|
||||
AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
Map<String, List<String>> newFollowedIndexUUIDS = new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs());
|
||||
newFollowedIndexUUIDS.compute(clusterAlias, (key, existingUUIDs) -> {
|
||||
newFollowedIndexUUIDS.compute(name, (key, existingUUIDs) -> {
|
||||
assert existingUUIDs != null;
|
||||
List<String> newUUIDs = new ArrayList<>(existingUUIDs);
|
||||
newUUIDs.add(indexToFollow.getUUID());
|
||||
|
@ -405,12 +432,12 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
|
||||
static class AutoFollowResult {
|
||||
|
||||
final String clusterAlias;
|
||||
final String autoFollowPatternName;
|
||||
final Exception clusterStateFetchException;
|
||||
final Map<Index, Exception> autoFollowExecutionResults;
|
||||
|
||||
AutoFollowResult(String clusterAlias, List<Tuple<Index, Exception>> results) {
|
||||
this.clusterAlias = clusterAlias;
|
||||
AutoFollowResult(String autoFollowPatternName, List<Tuple<Index, Exception>> results) {
|
||||
this.autoFollowPatternName = autoFollowPatternName;
|
||||
|
||||
Map<Index, Exception> autoFollowExecutionResults = new HashMap<>();
|
||||
for (Tuple<Index, Exception> result : results) {
|
||||
|
@ -421,14 +448,14 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
this.autoFollowExecutionResults = Collections.unmodifiableMap(autoFollowExecutionResults);
|
||||
}
|
||||
|
||||
AutoFollowResult(String clusterAlias, Exception e) {
|
||||
this.clusterAlias = clusterAlias;
|
||||
AutoFollowResult(String autoFollowPatternName, Exception e) {
|
||||
this.autoFollowPatternName = autoFollowPatternName;
|
||||
this.clusterStateFetchException = e;
|
||||
this.autoFollowExecutionResults = Collections.emptyMap();
|
||||
}
|
||||
|
||||
AutoFollowResult(String clusterAlias) {
|
||||
this(clusterAlias, (Exception) null);
|
||||
AutoFollowResult(String autoFollowPatternName) {
|
||||
this(autoFollowPatternName, (Exception) null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
@ -67,6 +68,8 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
private TimeValue pollTimeout = TransportResumeFollowAction.DEFAULT_POLL_TIMEOUT;
|
||||
private ByteSizeValue maxBatchSize = TransportResumeFollowAction.DEFAULT_MAX_BATCH_SIZE;
|
||||
|
||||
private long relativeStartNanos;
|
||||
|
||||
public Request(ShardId shardId, String expectedHistoryUUID) {
|
||||
super(shardId.getIndexName());
|
||||
this.shardId = shardId;
|
||||
|
@ -142,6 +145,9 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
expectedHistoryUUID = in.readString();
|
||||
pollTimeout = in.readTimeValue();
|
||||
maxBatchSize = new ByteSizeValue(in);
|
||||
|
||||
// Starting the clock in order to know how much time is spent on fetching operations:
|
||||
relativeStartNanos = System.nanoTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -220,6 +226,12 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
return operations;
|
||||
}
|
||||
|
||||
private long tookInMillis;
|
||||
|
||||
public long getTookInMillis() {
|
||||
return tookInMillis;
|
||||
}
|
||||
|
||||
Response() {
|
||||
}
|
||||
|
||||
|
@ -228,13 +240,15 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
final long globalCheckpoint,
|
||||
final long maxSeqNo,
|
||||
final long maxSeqNoOfUpdatesOrDeletes,
|
||||
final Translog.Operation[] operations) {
|
||||
final Translog.Operation[] operations,
|
||||
final long tookInMillis) {
|
||||
|
||||
this.mappingVersion = mappingVersion;
|
||||
this.globalCheckpoint = globalCheckpoint;
|
||||
this.maxSeqNo = maxSeqNo;
|
||||
this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes;
|
||||
this.operations = operations;
|
||||
this.tookInMillis = tookInMillis;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -245,6 +259,7 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
maxSeqNo = in.readZLong();
|
||||
maxSeqNoOfUpdatesOrDeletes = in.readZLong();
|
||||
operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new);
|
||||
tookInMillis = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -255,6 +270,7 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
out.writeZLong(maxSeqNo);
|
||||
out.writeZLong(maxSeqNoOfUpdatesOrDeletes);
|
||||
out.writeArray(Translog.Operation::writeOperation, operations);
|
||||
out.writeVLong(tookInMillis);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -266,12 +282,14 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
globalCheckpoint == that.globalCheckpoint &&
|
||||
maxSeqNo == that.maxSeqNo &&
|
||||
maxSeqNoOfUpdatesOrDeletes == that.maxSeqNoOfUpdatesOrDeletes &&
|
||||
Arrays.equals(operations, that.operations);
|
||||
Arrays.equals(operations, that.operations) &&
|
||||
tookInMillis == that.tookInMillis;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(mappingVersion, globalCheckpoint, maxSeqNo, maxSeqNoOfUpdatesOrDeletes, Arrays.hashCode(operations));
|
||||
return Objects.hash(mappingVersion, globalCheckpoint, maxSeqNo, maxSeqNoOfUpdatesOrDeletes,
|
||||
Arrays.hashCode(operations), tookInMillis);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -308,7 +326,7 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
request.getMaxBatchSize());
|
||||
// must capture after after snapshotting operations to ensure this MUS is at least the highest MUS of any of these operations.
|
||||
final long maxSeqNoOfUpdatesOrDeletes = indexShard.getMaxSeqNoOfUpdatesOrDeletes();
|
||||
return getResponse(mappingVersion, seqNoStats, maxSeqNoOfUpdatesOrDeletes, operations);
|
||||
return getResponse(mappingVersion, seqNoStats, maxSeqNoOfUpdatesOrDeletes, operations, request.relativeStartNanos);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -373,7 +391,8 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
clusterService.state().metaData().index(shardId.getIndex()).getMappingVersion();
|
||||
final SeqNoStats latestSeqNoStats = indexShard.seqNoStats();
|
||||
final long maxSeqNoOfUpdatesOrDeletes = indexShard.getMaxSeqNoOfUpdatesOrDeletes();
|
||||
listener.onResponse(getResponse(mappingVersion, latestSeqNoStats, maxSeqNoOfUpdatesOrDeletes, EMPTY_OPERATIONS_ARRAY));
|
||||
listener.onResponse(getResponse(mappingVersion, latestSeqNoStats, maxSeqNoOfUpdatesOrDeletes, EMPTY_OPERATIONS_ARRAY,
|
||||
request.relativeStartNanos));
|
||||
} catch (final Exception caught) {
|
||||
caught.addSuppressed(e);
|
||||
listener.onFailure(caught);
|
||||
|
@ -459,8 +478,11 @@ public class ShardChangesAction extends Action<ShardChangesAction.Response> {
|
|||
}
|
||||
|
||||
static Response getResponse(final long mappingVersion, final SeqNoStats seqNoStats,
|
||||
final long maxSeqNoOfUpdates, final Translog.Operation[] operations) {
|
||||
return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), maxSeqNoOfUpdates, operations);
|
||||
final long maxSeqNoOfUpdates, final Translog.Operation[] operations, long relativeStartNanos) {
|
||||
long tookInNanos = System.nanoTime() - relativeStartNanos;
|
||||
long tookInMillis = TimeUnit.NANOSECONDS.toMillis(tookInNanos);
|
||||
return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), maxSeqNoOfUpdates,
|
||||
operations, tookInMillis);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask {
|
|||
private int numConcurrentReads = 0;
|
||||
private int numConcurrentWrites = 0;
|
||||
private long currentMappingVersion = 0;
|
||||
private long totalFetchTookTimeMillis = 0;
|
||||
private long totalFetchTimeMillis = 0;
|
||||
private long numberOfSuccessfulFetches = 0;
|
||||
private long numberOfFailedFetches = 0;
|
||||
|
@ -238,6 +239,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask {
|
|||
fetchExceptions.remove(from);
|
||||
if (response.getOperations().length > 0) {
|
||||
// do not count polls against fetch stats
|
||||
totalFetchTookTimeMillis += response.getTookInMillis();
|
||||
totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime);
|
||||
numberOfSuccessfulFetches++;
|
||||
operationsReceived += response.getOperations().length;
|
||||
|
@ -449,6 +451,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask {
|
|||
buffer.size(),
|
||||
currentMappingVersion,
|
||||
totalFetchTimeMillis,
|
||||
totalFetchTookTimeMillis,
|
||||
numberOfSuccessfulFetches,
|
||||
numberOfFailedFetches,
|
||||
operationsReceived,
|
||||
|
|
|
@ -54,7 +54,7 @@ public class TransportDeleteAutoFollowPatternAction extends
|
|||
protected void masterOperation(DeleteAutoFollowPatternAction.Request request,
|
||||
ClusterState state,
|
||||
ActionListener<AcknowledgedResponse> listener) throws Exception {
|
||||
clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getLeaderCluster(),
|
||||
clusterService.submitStateUpdateTask("put-auto-follow-pattern-" + request.getName(),
|
||||
new AckedClusterStateUpdateTask<AcknowledgedResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
|
@ -72,23 +72,23 @@ public class TransportDeleteAutoFollowPatternAction extends
|
|||
static ClusterState innerDelete(DeleteAutoFollowPatternAction.Request request, ClusterState currentState) {
|
||||
AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
if (currentAutoFollowMetadata == null) {
|
||||
throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found",
|
||||
request.getLeaderCluster());
|
||||
throw new ResourceNotFoundException("auto-follow pattern [{}] is missing",
|
||||
request.getName());
|
||||
}
|
||||
Map<String, AutoFollowPattern> patterns = currentAutoFollowMetadata.getPatterns();
|
||||
AutoFollowPattern autoFollowPatternToRemove = patterns.get(request.getLeaderCluster());
|
||||
AutoFollowPattern autoFollowPatternToRemove = patterns.get(request.getName());
|
||||
if (autoFollowPatternToRemove == null) {
|
||||
throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found",
|
||||
request.getLeaderCluster());
|
||||
throw new ResourceNotFoundException("auto-follow pattern [{}] is missing",
|
||||
request.getName());
|
||||
}
|
||||
|
||||
final Map<String, AutoFollowPattern> patternsCopy = new HashMap<>(patterns);
|
||||
final Map<String, List<String>> followedLeaderIndexUUIDSCopy =
|
||||
new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs());
|
||||
final Map<String, Map<String, String>> headers = new HashMap<>(currentAutoFollowMetadata.getHeaders());
|
||||
patternsCopy.remove(request.getLeaderCluster());
|
||||
followedLeaderIndexUUIDSCopy.remove(request.getLeaderCluster());
|
||||
headers.remove(request.getLeaderCluster());
|
||||
patternsCopy.remove(request.getName());
|
||||
followedLeaderIndexUUIDSCopy.remove(request.getName());
|
||||
headers.remove(request.getName());
|
||||
|
||||
AutoFollowMetadata newAutoFollowMetadata = new AutoFollowMetadata(patternsCopy, followedLeaderIndexUUIDSCopy, headers);
|
||||
ClusterState.Builder newState = ClusterState.builder(currentState);
|
||||
|
|
|
@ -55,7 +55,7 @@ public class TransportGetAutoFollowPatternAction
|
|||
protected void masterOperation(GetAutoFollowPatternAction.Request request,
|
||||
ClusterState state,
|
||||
ActionListener<GetAutoFollowPatternAction.Response> listener) throws Exception {
|
||||
Map<String, AutoFollowPattern> autoFollowPatterns = getAutoFollowPattern(state.metaData(), request.getLeaderCluster());
|
||||
Map<String, AutoFollowPattern> autoFollowPatterns = getAutoFollowPattern(state.metaData(), request.getName());
|
||||
listener.onResponse(new GetAutoFollowPatternAction.Response(autoFollowPatterns));
|
||||
}
|
||||
|
||||
|
@ -64,20 +64,20 @@ public class TransportGetAutoFollowPatternAction
|
|||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
static Map<String, AutoFollowPattern> getAutoFollowPattern(MetaData metaData, String leaderClusterAlias) {
|
||||
static Map<String, AutoFollowPattern> getAutoFollowPattern(MetaData metaData, String name) {
|
||||
AutoFollowMetadata autoFollowMetadata = metaData.custom(AutoFollowMetadata.TYPE);
|
||||
if (autoFollowMetadata == null) {
|
||||
throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found", leaderClusterAlias);
|
||||
throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", name);
|
||||
}
|
||||
|
||||
if (leaderClusterAlias == null) {
|
||||
if (name == null) {
|
||||
return autoFollowMetadata.getPatterns();
|
||||
}
|
||||
|
||||
AutoFollowPattern autoFollowPattern = autoFollowMetadata.getPatterns().get(leaderClusterAlias);
|
||||
AutoFollowPattern autoFollowPattern = autoFollowMetadata.getPatterns().get(name);
|
||||
if (autoFollowPattern == null) {
|
||||
throw new ResourceNotFoundException("no auto-follow patterns for cluster alias [{}] found", leaderClusterAlias);
|
||||
throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", name);
|
||||
}
|
||||
return Collections.singletonMap(leaderClusterAlias, autoFollowPattern);
|
||||
return Collections.singletonMap(name, autoFollowPattern);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,14 +135,14 @@ public class TransportPutAutoFollowPatternAction extends
|
|||
headers = new HashMap<>();
|
||||
}
|
||||
|
||||
AutoFollowPattern previousPattern = patterns.get(request.getLeaderCluster());
|
||||
AutoFollowPattern previousPattern = patterns.get(request.getName());
|
||||
final List<String> followedIndexUUIDs;
|
||||
if (followedLeaderIndices.containsKey(request.getLeaderCluster())) {
|
||||
followedIndexUUIDs = new ArrayList<>(followedLeaderIndices.get(request.getLeaderCluster()));
|
||||
if (followedLeaderIndices.containsKey(request.getName())) {
|
||||
followedIndexUUIDs = new ArrayList<>(followedLeaderIndices.get(request.getName()));
|
||||
} else {
|
||||
followedIndexUUIDs = new ArrayList<>();
|
||||
}
|
||||
followedLeaderIndices.put(request.getLeaderCluster(), followedIndexUUIDs);
|
||||
followedLeaderIndices.put(request.getName(), followedIndexUUIDs);
|
||||
// Mark existing leader indices as already auto followed:
|
||||
if (previousPattern != null) {
|
||||
markExistingIndicesAsAutoFollowedForNewPatterns(request.getLeaderIndexPatterns(), leaderClusterState.metaData(),
|
||||
|
@ -153,10 +153,11 @@ public class TransportPutAutoFollowPatternAction extends
|
|||
}
|
||||
|
||||
if (filteredHeaders != null) {
|
||||
headers.put(request.getLeaderCluster(), filteredHeaders);
|
||||
headers.put(request.getName(), filteredHeaders);
|
||||
}
|
||||
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern(
|
||||
request.getLeaderCluster(),
|
||||
request.getLeaderIndexPatterns(),
|
||||
request.getFollowIndexNamePattern(),
|
||||
request.getMaxBatchOperationCount(),
|
||||
|
@ -166,7 +167,7 @@ public class TransportPutAutoFollowPatternAction extends
|
|||
request.getMaxWriteBufferSize(),
|
||||
request.getMaxRetryDelay(),
|
||||
request.getPollTimeout());
|
||||
patterns.put(request.getLeaderCluster(), autoFollowPattern);
|
||||
patterns.put(request.getName(), autoFollowPattern);
|
||||
ClusterState.Builder newState = ClusterState.builder(localState);
|
||||
newState.metaData(MetaData.builder(localState.getMetaData())
|
||||
.putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, followedLeaderIndices, headers))
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.license.LicenseUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -168,6 +169,7 @@ public final class TransportPutFollowAction
|
|||
settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
|
||||
settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex);
|
||||
settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true);
|
||||
settingsBuilder.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true);
|
||||
imdBuilder.settings(settingsBuilder);
|
||||
|
||||
// Copy mappings from leader IMD to follow IMD
|
||||
|
|
|
@ -240,6 +240,9 @@ public class TransportResumeFollowAction extends HandledTransportAction<ResumeFo
|
|||
if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) {
|
||||
throw new IllegalArgumentException("leader index [" + leaderIndexName + "] does not have soft deletes enabled");
|
||||
}
|
||||
if (followIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) {
|
||||
throw new IllegalArgumentException("follower index [" + request.getFollowerIndex() + "] does not have soft deletes enabled");
|
||||
}
|
||||
if (leaderIndex.getNumberOfShards() != followIndex.getNumberOfShards()) {
|
||||
throw new IllegalArgumentException("leader index primary shards [" + leaderIndex.getNumberOfShards() +
|
||||
"] does not match with the number of shards of the follow index [" + followIndex.getNumberOfShards() + "]");
|
||||
|
@ -382,7 +385,6 @@ public class TransportResumeFollowAction extends HandledTransportAction<ResumeFo
|
|||
whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING);
|
||||
whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING);
|
||||
|
||||
whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_SETTING);
|
||||
whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING);
|
||||
|
||||
WHITE_LISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings);
|
||||
|
|
|
@ -49,6 +49,9 @@ public final class FollowingEngine extends InternalEngine {
|
|||
if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(engineConfig.getIndexSettings().getSettings()) == false) {
|
||||
throw new IllegalArgumentException("a following engine can not be constructed for a non-following index");
|
||||
}
|
||||
if (engineConfig.getIndexSettings().isSoftDeleteEnabled() == false) {
|
||||
throw new IllegalArgumentException("a following engine requires soft deletes to be enabled");
|
||||
}
|
||||
return engineConfig;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ public class RestDeleteAutoFollowPatternAction extends BaseRestHandler {
|
|||
|
||||
public RestDeleteAutoFollowPatternAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.DELETE, "/_ccr/auto_follow/{leader_cluster}", this);
|
||||
controller.registerHandler(RestRequest.Method.DELETE, "/_ccr/auto_follow/{name}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -32,7 +32,7 @@ public class RestDeleteAutoFollowPatternAction extends BaseRestHandler {
|
|||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
Request request = new Request();
|
||||
request.setLeaderCluster(restRequest.param("leader_cluster"));
|
||||
request.setName(restRequest.param("name"));
|
||||
return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ public class RestGetAutoFollowPatternAction extends BaseRestHandler {
|
|||
|
||||
public RestGetAutoFollowPatternAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_ccr/auto_follow/{leader_cluster}", this);
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_ccr/auto_follow/{name}", this);
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_ccr/auto_follow", this);
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ public class RestGetAutoFollowPatternAction extends BaseRestHandler {
|
|||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
Request request = new Request();
|
||||
request.setLeaderCluster(restRequest.param("leader_cluster"));
|
||||
request.setName(restRequest.param("name"));
|
||||
return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public class RestPutAutoFollowPatternAction extends BaseRestHandler {
|
|||
|
||||
public RestPutAutoFollowPatternAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/_ccr/auto_follow/{leader_cluster}", this);
|
||||
controller.registerHandler(RestRequest.Method.PUT, "/_ccr/auto_follow/{name}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -38,7 +38,7 @@ public class RestPutAutoFollowPatternAction extends BaseRestHandler {
|
|||
|
||||
static Request createRequest(RestRequest restRequest) throws IOException {
|
||||
try (XContentParser parser = restRequest.contentOrSourceParamParser()) {
|
||||
return Request.fromXContent(parser, restRequest.param("leader_cluster"));
|
||||
return Request.fromXContent(parser, restRequest.param("name"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ccr;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -26,6 +27,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class AutoFollowIT extends CcrIntegTestCase {
|
||||
|
@ -45,7 +47,12 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
createLeaderIndex("logs-201812", leaderIndexSettings);
|
||||
|
||||
// Enabling auto following:
|
||||
putAutoFollowPatterns("logs-*", "transactions-*");
|
||||
if (randomBoolean()) {
|
||||
putAutoFollowPatterns("my-pattern", new String[] {"logs-*", "transactions-*"});
|
||||
} else {
|
||||
putAutoFollowPatterns("my-pattern1", new String[] {"logs-*"});
|
||||
putAutoFollowPatterns("my-pattern2", new String[] {"transactions-*"});
|
||||
}
|
||||
|
||||
createLeaderIndex("metrics-201901", leaderIndexSettings);
|
||||
|
||||
|
@ -76,7 +83,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
|
||||
putAutoFollowPatterns("logs-*");
|
||||
putAutoFollowPatterns("my-pattern", new String[] {"logs-*"});
|
||||
int numIndices = randomIntBetween(4, 32);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
createLeaderIndex("logs-" + i, leaderIndexSettings);
|
||||
|
@ -90,7 +97,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
deleteAutoFollowPatternSetting();
|
||||
createLeaderIndex("logs-does-not-count", leaderIndexSettings);
|
||||
|
||||
putAutoFollowPatterns("logs-*");
|
||||
putAutoFollowPatterns("my-pattern", new String[] {"logs-*"});
|
||||
int i = numIndices;
|
||||
numIndices = numIndices + randomIntBetween(4, 32);
|
||||
for (; i < numIndices; i++) {
|
||||
|
@ -113,6 +120,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
|
||||
// Enabling auto following:
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setName("my-pattern");
|
||||
request.setLeaderCluster("leader_cluster");
|
||||
request.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
|
||||
// Need to set this, because following an index in the same cluster
|
||||
|
@ -173,8 +181,53 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
private void putAutoFollowPatterns(String... patterns) {
|
||||
public void testConflictingPatterns() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
|
||||
// Enabling auto following:
|
||||
putAutoFollowPatterns("my-pattern1", new String[] {"logs-*"});
|
||||
putAutoFollowPatterns("my-pattern2", new String[] {"logs-2018*"});
|
||||
|
||||
createLeaderIndex("logs-201701", leaderIndexSettings);
|
||||
assertBusy(() -> {
|
||||
AutoFollowStats autoFollowStats = getAutoFollowStats();
|
||||
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
|
||||
assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), equalTo(0L));
|
||||
assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(0L));
|
||||
});
|
||||
IndicesExistsRequest request = new IndicesExistsRequest("copy-logs-201701");
|
||||
assertTrue(followerClient().admin().indices().exists(request).actionGet().isExists());
|
||||
|
||||
createLeaderIndex("logs-201801", leaderIndexSettings);
|
||||
assertBusy(() -> {
|
||||
AutoFollowStats autoFollowStats = getAutoFollowStats();
|
||||
assertThat(autoFollowStats.getNumberOfSuccessfulFollowIndices(), equalTo(1L));
|
||||
assertThat(autoFollowStats.getNumberOfFailedFollowIndices(), greaterThanOrEqualTo(1L));
|
||||
assertThat(autoFollowStats.getNumberOfFailedRemoteClusterStateRequests(), equalTo(0L));
|
||||
|
||||
assertThat(autoFollowStats.getRecentAutoFollowErrors().size(), equalTo(2));
|
||||
ElasticsearchException autoFollowError1 = autoFollowStats.getRecentAutoFollowErrors().get("my-pattern1:logs-201801");
|
||||
assertThat(autoFollowError1, notNullValue());
|
||||
assertThat(autoFollowError1.getRootCause().getMessage(), equalTo("index to follow [logs-201801] for pattern [my-pattern1] " +
|
||||
"matches with other patterns [my-pattern2]"));
|
||||
|
||||
ElasticsearchException autoFollowError2 = autoFollowStats.getRecentAutoFollowErrors().get("my-pattern2:logs-201801");
|
||||
assertThat(autoFollowError2, notNullValue());
|
||||
assertThat(autoFollowError2.getRootCause().getMessage(), equalTo("index to follow [logs-201801] for pattern [my-pattern2] " +
|
||||
"matches with other patterns [my-pattern1]"));
|
||||
});
|
||||
|
||||
request = new IndicesExistsRequest("copy-logs-201801");
|
||||
assertFalse(followerClient().admin().indices().exists(request).actionGet().isExists());
|
||||
}
|
||||
|
||||
private void putAutoFollowPatterns(String name, String[] patterns) {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setName(name);
|
||||
request.setLeaderCluster("leader_cluster");
|
||||
request.setLeaderIndexPatterns(Arrays.asList(patterns));
|
||||
// Need to set this, because following an index in the same cluster
|
||||
|
@ -184,7 +237,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
|
||||
private void deleteAutoFollowPatternSetting() {
|
||||
DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request();
|
||||
request.setLeaderCluster("leader_cluster");
|
||||
request.setName("my-pattern");
|
||||
assertTrue(followerClient().execute(DeleteAutoFollowPatternAction.INSTANCE, request).actionGet().isAcknowledged());
|
||||
}
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ public class AutoFollowMetadataTests extends AbstractSerializingTestCase<AutoFol
|
|||
for (int i = 0; i < numEntries; i++) {
|
||||
List<String> leaderPatterns = Arrays.asList(generateRandomStringArray(4, 4, false));
|
||||
AutoFollowMetadata.AutoFollowPattern autoFollowPattern = new AutoFollowMetadata.AutoFollowPattern(
|
||||
randomAlphaOfLength(4),
|
||||
leaderPatterns,
|
||||
randomAlphaOfLength(4),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
|
|
|
@ -118,6 +118,7 @@ public class CcrLicenseIT extends CcrSingleNodeTestCase {
|
|||
public void testThatPutAutoFollowPatternsIsUnavailableWithNonCompliantLicense() throws InterruptedException {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setName("name");
|
||||
request.setLeaderCluster("leader");
|
||||
request.setLeaderIndexPatterns(Collections.singletonList("*"));
|
||||
client().execute(
|
||||
|
@ -147,8 +148,8 @@ public class CcrLicenseIT extends CcrSingleNodeTestCase {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern("test_alias", Collections.singletonList("logs-*"),
|
||||
null, null, null, null, null, null, null, null);
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(
|
||||
Collections.singletonMap("test_alias", autoFollowPattern),
|
||||
Collections.emptyMap(),
|
||||
|
|
|
@ -683,6 +683,7 @@ public class IndexFollowingIT extends CcrIntegTestCase {
|
|||
() -> followerClient().execute(ResumeFollowAction.INSTANCE, followRequest.getFollowRequest()).actionGet());
|
||||
assertThat(e.getMessage(), equalTo("unknown cluster alias [another_cluster]"));
|
||||
PutAutoFollowPatternAction.Request putAutoFollowRequest = new PutAutoFollowPatternAction.Request();
|
||||
putAutoFollowRequest.setName("name");
|
||||
putAutoFollowRequest.setLeaderCluster("another_cluster");
|
||||
putAutoFollowRequest.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
|
|
|
@ -56,7 +56,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
.build();
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
|
@ -120,7 +120,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
when(client.getRemoteClusterClient(anyString())).thenReturn(client);
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
|
@ -178,7 +178,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
.build();
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
|
@ -241,7 +241,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
.build();
|
||||
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
new AutoFollowPattern("remote", Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("remote", autoFollowPattern);
|
||||
Map<String, List<String>> followedLeaderIndexUUIDS = new HashMap<>();
|
||||
|
@ -295,7 +295,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
|
||||
public void testGetLeaderIndicesToFollow() {
|
||||
AutoFollowPattern autoFollowPattern =
|
||||
new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null);
|
||||
new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null);
|
||||
Map<String, Map<String, String>> headers = new HashMap<>();
|
||||
ClusterState followerState = ClusterState.builder(new ClusterName("remote"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE,
|
||||
|
@ -342,15 +342,15 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testGetFollowerIndexName() {
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null,
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), null, null,
|
||||
null, null, null, null, null, null);
|
||||
assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("metrics-0"));
|
||||
|
||||
autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-metrics-0", null, null,
|
||||
autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), "eu-metrics-0", null, null,
|
||||
null, null, null, null, null);
|
||||
assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0"));
|
||||
|
||||
autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-{{leader_index}}", null,
|
||||
autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), "eu-{{leader_index}}", null,
|
||||
null, null, null, null, null, null);
|
||||
assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0"));
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ public class DeleteAutoFollowPatternRequestTests extends AbstractStreamableTestC
|
|||
@Override
|
||||
protected DeleteAutoFollowPatternAction.Request createTestInstance() {
|
||||
DeleteAutoFollowPatternAction.Request request = new DeleteAutoFollowPatternAction.Request();
|
||||
request.setLeaderCluster(randomAlphaOfLength(4));
|
||||
request.setName(randomAlphaOfLength(4));
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ public class GetAutoFollowPatternRequestTests extends AbstractWireSerializingTes
|
|||
protected GetAutoFollowPatternAction.Request createTestInstance() {
|
||||
GetAutoFollowPatternAction.Request request = new GetAutoFollowPatternAction.Request();
|
||||
if (randomBoolean()) {
|
||||
request.setLeaderCluster(randomAlphaOfLength(4));
|
||||
request.setName(randomAlphaOfLength(4));
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ public class GetAutoFollowPatternResponseTests extends AbstractStreamableTestCas
|
|||
Map<String, AutoFollowPattern> patterns = new HashMap<>(numPatterns);
|
||||
for (int i = 0; i < numPatterns; i++) {
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern(
|
||||
"remote",
|
||||
Collections.singletonList(randomAlphaOfLength(4)),
|
||||
randomAlphaOfLength(4),
|
||||
randomIntBetween(0, Integer.MAX_VALUE),
|
||||
|
|
|
@ -41,6 +41,7 @@ public class PutAutoFollowPatternRequestTests extends AbstractStreamableXContent
|
|||
@Override
|
||||
protected PutAutoFollowPatternAction.Request createTestInstance() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setName(randomAlphaOfLength(4));
|
||||
request.setLeaderCluster(randomAlphaOfLength(4));
|
||||
request.setLeaderIndexPatterns(Arrays.asList(generateRandomStringArray(4, 4, false)));
|
||||
if (randomBoolean()) {
|
||||
|
@ -74,6 +75,11 @@ public class PutAutoFollowPatternRequestTests extends AbstractStreamableXContent
|
|||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
assertThat(validationException, notNullValue());
|
||||
assertThat(validationException.getMessage(), containsString("[name] is missing"));
|
||||
|
||||
request.setName("name");
|
||||
validationException = request.validate();
|
||||
assertThat(validationException, notNullValue());
|
||||
assertThat(validationException.getMessage(), containsString("[leader_cluster] is missing"));
|
||||
|
||||
request.setLeaderCluster("_alias");
|
||||
|
|
|
@ -26,7 +26,8 @@ public class ShardChangesResponseTests extends AbstractStreamableTestCase<ShardC
|
|||
leaderGlobalCheckpoint,
|
||||
leaderMaxSeqNo,
|
||||
maxSeqNoOfUpdatesOrDeletes,
|
||||
operations
|
||||
operations,
|
||||
randomNonNegativeLong()
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase {
|
|||
final long globalCheckpoint = tracker.getCheckpoint();
|
||||
final long maxSeqNo = tracker.getMaxSeqNo();
|
||||
handler.accept(new ShardChangesAction.Response(
|
||||
0L, globalCheckpoint, maxSeqNo, randomNonNegativeLong(), new Translog.Operation[0]));
|
||||
0L, globalCheckpoint, maxSeqNo, randomNonNegativeLong(), new Translog.Operation[0], 1L));
|
||||
}
|
||||
};
|
||||
threadPool.generic().execute(task);
|
||||
|
@ -233,7 +233,8 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase {
|
|||
nextGlobalCheckPoint,
|
||||
nextGlobalCheckPoint,
|
||||
randomNonNegativeLong(),
|
||||
ops.toArray(EMPTY))
|
||||
ops.toArray(EMPTY),
|
||||
randomNonNegativeLong())
|
||||
)
|
||||
);
|
||||
responses.put(prevGlobalCheckpoint, item);
|
||||
|
@ -256,7 +257,8 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase {
|
|||
prevGlobalCheckpoint,
|
||||
prevGlobalCheckpoint,
|
||||
randomNonNegativeLong(),
|
||||
EMPTY
|
||||
EMPTY,
|
||||
randomNonNegativeLong()
|
||||
);
|
||||
item.add(new TestResponse(null, mappingVersion, response));
|
||||
}
|
||||
|
@ -273,7 +275,8 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase {
|
|||
localLeaderGCP,
|
||||
localLeaderGCP,
|
||||
randomNonNegativeLong(),
|
||||
ops.toArray(EMPTY)
|
||||
ops.toArray(EMPTY),
|
||||
randomNonNegativeLong()
|
||||
);
|
||||
item.add(new TestResponse(null, mappingVersion, response));
|
||||
responses.put(fromSeqNo, Collections.unmodifiableList(item));
|
||||
|
|
|
@ -56,6 +56,7 @@ public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase<
|
|||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomReadExceptions(),
|
||||
randomLong(),
|
||||
randomBoolean() ? new ElasticsearchException("fatal error") : null);
|
||||
|
|
|
@ -439,7 +439,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase {
|
|||
assertThat(shardChangesRequests.get(0)[1], equalTo(64L));
|
||||
|
||||
shardChangesRequests.clear();
|
||||
task.innerHandleReadResponse(0L, 63L, new ShardChangesAction.Response(0, 0, 0, 100, new Translog.Operation[0]));
|
||||
task.innerHandleReadResponse(0L, 63L, new ShardChangesAction.Response(0, 0, 0, 100, new Translog.Operation[0], 1L));
|
||||
|
||||
assertThat(shardChangesRequests.size(), equalTo(1));
|
||||
assertThat(shardChangesRequests.get(0)[0], equalTo(0L));
|
||||
|
@ -782,7 +782,8 @@ public class ShardFollowNodeTaskTests extends ESTestCase {
|
|||
leaderGlobalCheckpoints.poll(),
|
||||
maxSeqNos.poll(),
|
||||
randomNonNegativeLong(),
|
||||
operations
|
||||
operations,
|
||||
1L
|
||||
);
|
||||
handler.accept(response);
|
||||
}
|
||||
|
@ -813,7 +814,8 @@ public class ShardFollowNodeTaskTests extends ESTestCase {
|
|||
leaderGlobalCheckPoint,
|
||||
leaderGlobalCheckPoint,
|
||||
randomNonNegativeLong(),
|
||||
ops.toArray(new Translog.Operation[0])
|
||||
ops.toArray(new Translog.Operation[0]),
|
||||
1L
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -429,7 +429,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest
|
|||
final long maxSeqNoOfUpdatesOrDeletes = indexShard.getMaxSeqNoOfUpdatesOrDeletes();
|
||||
if (from > seqNoStats.getGlobalCheckpoint()) {
|
||||
handler.accept(ShardChangesAction.getResponse(1L, seqNoStats,
|
||||
maxSeqNoOfUpdatesOrDeletes, ShardChangesAction.EMPTY_OPERATIONS_ARRAY));
|
||||
maxSeqNoOfUpdatesOrDeletes, ShardChangesAction.EMPTY_OPERATIONS_ARRAY, 1L));
|
||||
return;
|
||||
}
|
||||
Translog.Operation[] ops = ShardChangesAction.getOperations(indexShard, seqNoStats.getGlobalCheckpoint(), from,
|
||||
|
@ -440,7 +440,8 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest
|
|||
seqNoStats.getGlobalCheckpoint(),
|
||||
seqNoStats.getMaxSeqNo(),
|
||||
maxSeqNoOfUpdatesOrDeletes,
|
||||
ops
|
||||
ops,
|
||||
1L
|
||||
);
|
||||
handler.accept(response);
|
||||
return;
|
||||
|
|
|
@ -49,6 +49,7 @@ public class StatsResponsesTests extends AbstractStreamableTestCase<FollowStatsA
|
|||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
randomNonNegativeLong(),
|
||||
Collections.emptyNavigableMap(),
|
||||
randomLong(),
|
||||
randomBoolean() ? new ElasticsearchException("fatal error") : null);
|
||||
|
|
|
@ -10,6 +10,7 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern;
|
||||
import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction.Request;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
|
||||
|
@ -27,28 +28,28 @@ public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase {
|
|||
public void testInnerDelete() {
|
||||
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
|
||||
Map<String, Map<String, String>> existingHeaders = new HashMap<>();
|
||||
Map<String, AutoFollowMetadata.AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
Map<String, AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
{
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("transactions-*");
|
||||
existingAutoFollowPatterns.put("eu_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
existingAutoFollowPatterns.put("name1",
|
||||
new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null));
|
||||
|
||||
List<String> existingUUIDS = new ArrayList<>();
|
||||
existingUUIDS.add("_val");
|
||||
existingAlreadyFollowedIndexUUIDS.put("eu_cluster", existingUUIDS);
|
||||
existingHeaders.put("eu_cluster", Collections.singletonMap("key", "val"));
|
||||
existingAlreadyFollowedIndexUUIDS.put("name1", existingUUIDS);
|
||||
existingHeaders.put("name1", Collections.singletonMap("key", "val"));
|
||||
}
|
||||
{
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("logs-*");
|
||||
existingAutoFollowPatterns.put("asia_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
existingAutoFollowPatterns.put("name2",
|
||||
new AutoFollowPattern("asia_cluster", existingPatterns, null, null, null, null, null, null, null, null));
|
||||
|
||||
List<String> existingUUIDS = new ArrayList<>();
|
||||
existingUUIDS.add("_val");
|
||||
existingAlreadyFollowedIndexUUIDS.put("asia_cluster", existingUUIDS);
|
||||
existingHeaders.put("asia_cluster", Collections.singletonMap("key", "val"));
|
||||
existingAlreadyFollowedIndexUUIDS.put("name2", existingUUIDS);
|
||||
existingHeaders.put("name2", Collections.singletonMap("key", "val"));
|
||||
}
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE,
|
||||
|
@ -56,27 +57,28 @@ public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase {
|
|||
.build();
|
||||
|
||||
Request request = new Request();
|
||||
request.setLeaderCluster("eu_cluster");
|
||||
request.setName("name1");
|
||||
AutoFollowMetadata result = TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState)
|
||||
.getMetaData()
|
||||
.custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(result.getPatterns().size(), equalTo(1));
|
||||
assertThat(result.getPatterns().get("asia_cluster"), notNullValue());
|
||||
assertThat(result.getPatterns().get("name2"), notNullValue());
|
||||
assertThat(result.getPatterns().get("name2").getLeaderCluster(), equalTo("asia_cluster"));
|
||||
assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(result.getFollowedLeaderIndexUUIDs().get("asia_cluster"), notNullValue());
|
||||
assertThat(result.getFollowedLeaderIndexUUIDs().get("name2"), notNullValue());
|
||||
assertThat(result.getHeaders().size(), equalTo(1));
|
||||
assertThat(result.getHeaders().get("asia_cluster"), notNullValue());
|
||||
assertThat(result.getHeaders().get("name2"), notNullValue());
|
||||
}
|
||||
|
||||
public void testInnerDeleteDoesNotExist() {
|
||||
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
|
||||
Map<String, AutoFollowMetadata.AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
Map<String, AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
Map<String, Map<String, String>> existingHeaders = new HashMap<>();
|
||||
{
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("transactions-*");
|
||||
existingAutoFollowPatterns.put("eu_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
existingAutoFollowPatterns.put("name1",
|
||||
new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null));
|
||||
existingHeaders.put("key", Collections.singletonMap("key", "val"));
|
||||
}
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
|
@ -85,10 +87,10 @@ public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase {
|
|||
.build();
|
||||
|
||||
Request request = new Request();
|
||||
request.setLeaderCluster("asia_cluster");
|
||||
request.setName("name2");
|
||||
Exception e = expectThrows(ResourceNotFoundException.class,
|
||||
() -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState));
|
||||
assertThat(e.getMessage(), equalTo("no auto-follow patterns for cluster alias [asia_cluster] found"));
|
||||
assertThat(e.getMessage(), equalTo("auto-follow pattern [name2] is missing"));
|
||||
}
|
||||
|
||||
public void testInnerDeleteNoAutoFollowMetadata() {
|
||||
|
@ -97,10 +99,10 @@ public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase {
|
|||
.build();
|
||||
|
||||
Request request = new Request();
|
||||
request.setLeaderCluster("asia_cluster");
|
||||
request.setName("name1");
|
||||
Exception e = expectThrows(ResourceNotFoundException.class,
|
||||
() -> TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState));
|
||||
assertThat(e.getMessage(), equalTo("no auto-follow patterns for cluster alias [asia_cluster] found"));
|
||||
assertThat(e.getMessage(), equalTo("auto-follow pattern [name1] is missing"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,22 +23,22 @@ public class TransportGetAutoFollowPatternActionTests extends ESTestCase {
|
|||
|
||||
public void testGetAutoFollowPattern() {
|
||||
Map<String, AutoFollowPattern> patterns = new HashMap<>();
|
||||
patterns.put("test_alias1",
|
||||
new AutoFollowPattern(Collections.singletonList("index-*"), null, null, null, null, null, null, null, null));
|
||||
patterns.put("test_alias2",
|
||||
new AutoFollowPattern(Collections.singletonList("index-*"), null, null, null, null, null, null, null, null));
|
||||
patterns.put("name1",
|
||||
new AutoFollowPattern("test_alias1", Collections.singletonList("index-*"), null, null, null, null, null, null, null, null));
|
||||
patterns.put("name2",
|
||||
new AutoFollowPattern("test_alias1", Collections.singletonList("index-*"), null, null, null, null, null, null, null, null));
|
||||
MetaData metaData = MetaData.builder()
|
||||
.putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()))
|
||||
.build();
|
||||
|
||||
Map<String, AutoFollowPattern> result = TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, "test_alias1");
|
||||
Map<String, AutoFollowPattern> result = TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, "name1");
|
||||
assertThat(result.size(), equalTo(1));
|
||||
assertThat(result, hasEntry("test_alias1", patterns.get("test_alias1")));
|
||||
assertThat(result, hasEntry("name1", patterns.get("name1")));
|
||||
|
||||
result = TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, null);
|
||||
assertThat(result.size(), equalTo(2));
|
||||
assertThat(result, hasEntry("test_alias1", patterns.get("test_alias1")));
|
||||
assertThat(result, hasEntry("test_alias2", patterns.get("test_alias2")));
|
||||
assertThat(result, hasEntry("name1", patterns.get("name1")));
|
||||
assertThat(result, hasEntry("name2", patterns.get("name2")));
|
||||
|
||||
expectThrows(ResourceNotFoundException.class,
|
||||
() -> TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, "another_alias"));
|
||||
|
@ -51,13 +51,13 @@ public class TransportGetAutoFollowPatternActionTests extends ESTestCase {
|
|||
.putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)
|
||||
.build();
|
||||
expectThrows(ResourceNotFoundException.class,
|
||||
() -> TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, "test_alias"));
|
||||
() -> TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, "name1"));
|
||||
}
|
||||
|
||||
public void testGetAutoFollowPatternNoAutoFollowMetadata() {
|
||||
MetaData metaData = MetaData.builder().build();
|
||||
expectThrows(ResourceNotFoundException.class,
|
||||
() -> TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, "test_alias"));
|
||||
() -> TransportGetAutoFollowPatternAction.getAutoFollowPattern(metaData, "name1"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
|
||||
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern;
|
||||
import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -28,6 +29,7 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase {
|
|||
|
||||
public void testInnerPut() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setName("name1");
|
||||
request.setLeaderCluster("eu_cluster");
|
||||
request.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
|
||||
|
||||
|
@ -43,14 +45,16 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase {
|
|||
AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(autoFollowMetadata, notNullValue());
|
||||
assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderCluster(), equalTo("eu_cluster"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(0));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("name1").size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testInnerPut_existingLeaderIndices() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setName("name1");
|
||||
request.setLeaderCluster("eu_cluster");
|
||||
request.setLeaderIndexPatterns(Collections.singletonList("logs-*"));
|
||||
|
||||
|
@ -82,28 +86,30 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase {
|
|||
AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(autoFollowMetadata, notNullValue());
|
||||
assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderCluster(), equalTo("eu_cluster"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(numMatchingLeaderIndices));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("name1").size(), equalTo(numMatchingLeaderIndices));
|
||||
}
|
||||
|
||||
public void testInnerPut_existingLeaderIndicesAndAutoFollowMetadata() {
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request();
|
||||
request.setName("name1");
|
||||
request.setLeaderCluster("eu_cluster");
|
||||
request.setLeaderIndexPatterns(Arrays.asList("logs-*", "transactions-*"));
|
||||
|
||||
Map<String, AutoFollowMetadata.AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
Map<String, AutoFollowPattern> existingAutoFollowPatterns = new HashMap<>();
|
||||
List<String> existingPatterns = new ArrayList<>();
|
||||
existingPatterns.add("transactions-*");
|
||||
existingAutoFollowPatterns.put("eu_cluster",
|
||||
new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null));
|
||||
existingAutoFollowPatterns.put("name1",
|
||||
new AutoFollowPattern("eu_cluster", existingPatterns, null, null, null, null, null, null, null, null));
|
||||
Map<String, List<String>> existingAlreadyFollowedIndexUUIDS = new HashMap<>();
|
||||
List<String> existingUUIDS = new ArrayList<>();
|
||||
existingUUIDS.add("_val");
|
||||
existingAlreadyFollowedIndexUUIDS.put("eu_cluster", existingUUIDS);
|
||||
existingAlreadyFollowedIndexUUIDS.put("name1", existingUUIDS);
|
||||
Map<String, Map<String, String>> existingHeaders = new HashMap<>();
|
||||
existingHeaders.put("eu_cluster", Collections.singletonMap("key", "val"));
|
||||
existingHeaders.put("name1", Collections.singletonMap("key", "val"));
|
||||
|
||||
ClusterState localState = ClusterState.builder(new ClusterName("us_cluster"))
|
||||
.metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE,
|
||||
|
@ -127,13 +133,14 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase {
|
|||
AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(autoFollowMetadata, notNullValue());
|
||||
assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().size(), equalTo(2));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("eu_cluster").getLeaderIndexPatterns().get(1), equalTo("transactions-*"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderCluster(), equalTo("eu_cluster"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().size(), equalTo(2));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(0), equalTo("logs-*"));
|
||||
assertThat(autoFollowMetadata.getPatterns().get("name1").getLeaderIndexPatterns().get(1), equalTo("transactions-*"));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("eu_cluster").size(), equalTo(numLeaderIndices + 1));
|
||||
assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("name1").size(), equalTo(numLeaderIndices + 1));
|
||||
assertThat(autoFollowMetadata.getHeaders().size(), equalTo(1));
|
||||
assertThat(autoFollowMetadata.getHeaders().get("eu_cluster"), notNullValue());
|
||||
assertThat(autoFollowMetadata.getHeaders().get("name1"), notNullValue());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -85,11 +85,20 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null));
|
||||
assertThat(e.getMessage(), equalTo("leader index [leader_cluster:index1] does not have soft deletes enabled"));
|
||||
}
|
||||
{
|
||||
// should fail because the follower index does not have soft deletes enabled
|
||||
IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, customMetaData);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null));
|
||||
assertThat(e.getMessage(), equalTo("follower index [index2] does not have soft deletes enabled"));
|
||||
}
|
||||
{
|
||||
// should fail because the number of primary shards between leader and follow index are not equal
|
||||
IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", 4, Settings.EMPTY, customMetaData);
|
||||
IndexMetaData followIMD = createIMD("index2", 4,
|
||||
Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), customMetaData);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null));
|
||||
assertThat(e.getMessage(),
|
||||
equalTo("leader index primary shards [5] does not match with the number of shards of the follow index [4]"));
|
||||
|
@ -98,8 +107,8 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
// should fail, because leader index is closed
|
||||
IndexMetaData leaderIMD = createIMD("index1", State.CLOSE, "{}", 5, Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", State.OPEN, "{}", 5, Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), customMetaData);
|
||||
IndexMetaData followIMD = createIMD("index2", State.OPEN, "{}", 5,
|
||||
Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), customMetaData);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null));
|
||||
assertThat(e.getMessage(), equalTo("leader and follow index must be open"));
|
||||
}
|
||||
|
@ -107,7 +116,8 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
// should fail, because index.xpack.ccr.following_index setting has not been enabled in leader index
|
||||
IndexMetaData leaderIMD = createIMD("index1", 1,
|
||||
Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", 1, Settings.EMPTY, customMetaData);
|
||||
IndexMetaData followIMD = createIMD("index2", 1,
|
||||
Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), customMetaData);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2");
|
||||
mapperService.updateMapping(null, followIMD);
|
||||
Exception e = expectThrows(IllegalArgumentException.class,
|
||||
|
@ -120,7 +130,8 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
IndexMetaData leaderIMD = createIMD("index1", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"keyword\"}}}", 5,
|
||||
Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"text\"}}}", 5,
|
||||
Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(), customMetaData);
|
||||
Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build(), customMetaData);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2");
|
||||
mapperService.updateMapping(null, followIMD);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, mapperService));
|
||||
|
@ -135,6 +146,7 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
.put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder()
|
||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put("index.analysis.analyzer.my_analyzer.type", "custom")
|
||||
.put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), customMetaData);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null));
|
||||
|
@ -144,8 +156,8 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
// should fail because the following index does not have the following_index settings
|
||||
IndexMetaData leaderIMD = createIMD("index1", 5,
|
||||
Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null);
|
||||
Settings followingIndexSettings = randomBoolean() ? Settings.EMPTY :
|
||||
Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), false).build();
|
||||
Settings followingIndexSettings = Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), false).build();
|
||||
IndexMetaData followIMD = createIMD("index2", 5, followingIndexSettings, customMetaData);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(),
|
||||
followingIndexSettings, "index2");
|
||||
|
@ -160,6 +172,7 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", 5, Settings.builder()
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(), customMetaData);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2");
|
||||
mapperService.updateMapping(null, followIMD);
|
||||
|
@ -174,6 +187,7 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
.put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder()
|
||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put("index.analysis.analyzer.my_analyzer.type", "custom")
|
||||
.put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), customMetaData);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(),
|
||||
|
@ -191,6 +205,7 @@ public class TransportResumeFollowActionTests extends ESTestCase {
|
|||
.put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), null);
|
||||
IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder()
|
||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s")
|
||||
.put("index.analysis.analyzer.my_analyzer.type", "custom")
|
||||
.put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), customMetaData);
|
||||
|
|
|
@ -41,7 +41,8 @@ public class BulkShardOperationsTests extends IndexShardTestCase {
|
|||
|
||||
// test that we use the primary term on the follower when applying operations from the leader
|
||||
public void testPrimaryTermFromFollower() throws IOException {
|
||||
final Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build();
|
||||
final Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build();
|
||||
final IndexShard followerPrimary = newStartedShard(true, settings, new FollowingEngineFactory());
|
||||
|
||||
// we use this primary on the operations yet we expect the applied operations to have the primary term of the follower
|
||||
|
|
|
@ -14,6 +14,7 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
|
@ -31,6 +32,7 @@ public class FollowEngineIndexShardTests extends IndexShardTestCase {
|
|||
public void testDoNotFillGaps() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.build();
|
||||
final IndexShard indexShard = newStartedShard(false, settings, new FollowingEngineFactory());
|
||||
|
||||
|
|
|
@ -127,6 +127,7 @@ public class FollowingEngineTests extends ESTestCase {
|
|||
.put("index.number_of_replicas", 0)
|
||||
.put("index.version.created", Version.CURRENT)
|
||||
.put("index.xpack.ccr.following_index", true)
|
||||
.put("index.soft_deletes.enabled", true)
|
||||
.build();
|
||||
final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
|
||||
|
@ -152,6 +153,7 @@ public class FollowingEngineTests extends ESTestCase {
|
|||
.put("index.number_of_replicas", 0)
|
||||
.put("index.version.created", Version.CURRENT)
|
||||
.put("index.xpack.ccr.following_index", true)
|
||||
.put("index.soft_deletes.enabled", true)
|
||||
.build();
|
||||
final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
|
||||
|
@ -186,6 +188,7 @@ public class FollowingEngineTests extends ESTestCase {
|
|||
.put("index.number_of_replicas", 0)
|
||||
.put("index.version.created", Version.CURRENT)
|
||||
.put("index.xpack.ccr.following_index", true)
|
||||
.put("index.soft_deletes.enabled", true)
|
||||
.build();
|
||||
final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
|
||||
|
@ -216,6 +219,7 @@ public class FollowingEngineTests extends ESTestCase {
|
|||
.put("index.number_of_replicas", 0)
|
||||
.put("index.version.created", Version.CURRENT)
|
||||
.put("index.xpack.ccr.following_index", true)
|
||||
.put("index.soft_deletes.enabled", true)
|
||||
.build();
|
||||
final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
|
||||
|
|
|
@ -94,6 +94,7 @@ public class FollowStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Fol
|
|||
final int numberOfQueuedWrites = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
final long mappingVersion = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
final long totalFetchTimeMillis = randomLongBetween(0, 4096);
|
||||
final long totalFetchTookTimeMillis = randomLongBetween(0, 4096);
|
||||
final long numberOfSuccessfulFetches = randomNonNegativeLong();
|
||||
final long numberOfFailedFetches = randomLongBetween(0, 8);
|
||||
final long operationsReceived = randomNonNegativeLong();
|
||||
|
@ -122,6 +123,7 @@ public class FollowStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Fol
|
|||
numberOfQueuedWrites,
|
||||
mappingVersion,
|
||||
totalFetchTimeMillis,
|
||||
totalFetchTookTimeMillis,
|
||||
numberOfSuccessfulFetches,
|
||||
numberOfFailedFetches,
|
||||
operationsReceived,
|
||||
|
@ -166,6 +168,7 @@ public class FollowStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Fol
|
|||
+ "\"number_of_queued_writes\":" + numberOfQueuedWrites + ","
|
||||
+ "\"mapping_version\":" + mappingVersion + ","
|
||||
+ "\"total_fetch_time_millis\":" + totalFetchTimeMillis + ","
|
||||
+ "\"total_fetch_leader_time_millis\":" + totalFetchTookTimeMillis + ","
|
||||
+ "\"number_of_successful_fetches\":" + numberOfSuccessfulFetches + ","
|
||||
+ "\"number_of_failed_fetches\":" + numberOfFailedFetches + ","
|
||||
+ "\"operations_received\":" + operationsReceived + ","
|
||||
|
@ -208,6 +211,7 @@ public class FollowStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Fol
|
|||
1,
|
||||
1,
|
||||
100,
|
||||
50,
|
||||
10,
|
||||
0,
|
||||
10,
|
||||
|
@ -226,7 +230,6 @@ public class FollowStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Fol
|
|||
Map<String, Object> template =
|
||||
XContentHelper.convertToMap(XContentType.JSON.xContent(), MonitoringTemplateUtils.loadTemplate("es"), false);
|
||||
Map<?, ?> followStatsMapping = (Map<?, ?>) XContentMapValues.extractValue("mappings.doc.properties.ccr_stats.properties", template);
|
||||
|
||||
assertThat(serializedStatus.size(), equalTo(followStatsMapping.size()));
|
||||
for (Map.Entry<String, Object> entry : serializedStatus.entrySet()) {
|
||||
String fieldName = entry.getKey();
|
||||
|
|
|
@ -175,6 +175,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
|
||||
public static class AutoFollowPattern implements Writeable, ToXContentObject {
|
||||
|
||||
public static final ParseField LEADER_CLUSTER_FIELD = new ParseField("leader_cluster");
|
||||
public static final ParseField LEADER_PATTERNS_FIELD = new ParseField("leader_index_patterns");
|
||||
public static final ParseField FOLLOW_PATTERN_FIELD = new ParseField("follow_index_pattern");
|
||||
public static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count");
|
||||
|
@ -188,10 +189,12 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<AutoFollowPattern, Void> PARSER =
|
||||
new ConstructingObjectParser<>("auto_follow_pattern",
|
||||
args -> new AutoFollowPattern((List<String>) args[0], (String) args[1], (Integer) args[2], (Integer) args[3],
|
||||
(ByteSizeValue) args[4], (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]));
|
||||
args -> new AutoFollowPattern((String) args[0], (List<String>) args[1], (String) args[2], (Integer) args[3],
|
||||
(Integer) args[4], (ByteSizeValue) args[5], (Integer) args[6], (Integer) args[7], (TimeValue) args[8],
|
||||
(TimeValue) args[9]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_CLUSTER_FIELD);
|
||||
PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), LEADER_PATTERNS_FIELD);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOW_PATTERN_FIELD);
|
||||
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAX_BATCH_OPERATION_COUNT);
|
||||
|
@ -211,6 +214,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
POLL_TIMEOUT, ObjectParser.ValueType.STRING);
|
||||
}
|
||||
|
||||
private final String leaderCluster;
|
||||
private final List<String> leaderIndexPatterns;
|
||||
private final String followIndexPattern;
|
||||
private final Integer maxBatchOperationCount;
|
||||
|
@ -221,7 +225,8 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
private final TimeValue maxRetryDelay;
|
||||
private final TimeValue pollTimeout;
|
||||
|
||||
public AutoFollowPattern(List<String> leaderIndexPatterns,
|
||||
public AutoFollowPattern(String leaderCluster,
|
||||
List<String> leaderIndexPatterns,
|
||||
String followIndexPattern,
|
||||
Integer maxBatchOperationCount,
|
||||
Integer maxConcurrentReadBatches,
|
||||
|
@ -230,6 +235,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
Integer maxWriteBufferSize,
|
||||
TimeValue maxRetryDelay,
|
||||
TimeValue pollTimeout) {
|
||||
this.leaderCluster = leaderCluster;
|
||||
this.leaderIndexPatterns = leaderIndexPatterns;
|
||||
this.followIndexPattern = followIndexPattern;
|
||||
this.maxBatchOperationCount = maxBatchOperationCount;
|
||||
|
@ -242,6 +248,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
}
|
||||
|
||||
public AutoFollowPattern(StreamInput in) throws IOException {
|
||||
leaderCluster = in.readString();
|
||||
leaderIndexPatterns = in.readList(StreamInput::readString);
|
||||
followIndexPattern = in.readOptionalString();
|
||||
maxBatchOperationCount = in.readOptionalVInt();
|
||||
|
@ -261,6 +268,10 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
return Regex.simpleMatch(leaderIndexPatterns, indexName);
|
||||
}
|
||||
|
||||
public String getLeaderCluster() {
|
||||
return leaderCluster;
|
||||
}
|
||||
|
||||
public List<String> getLeaderIndexPatterns() {
|
||||
return leaderIndexPatterns;
|
||||
}
|
||||
|
@ -299,6 +310,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(leaderCluster);
|
||||
out.writeStringList(leaderIndexPatterns);
|
||||
out.writeOptionalString(followIndexPattern);
|
||||
out.writeOptionalVInt(maxBatchOperationCount);
|
||||
|
@ -312,6 +324,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(LEADER_CLUSTER_FIELD.getPreferredName(), leaderCluster);
|
||||
builder.array(LEADER_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns.toArray(new String[0]));
|
||||
if (followIndexPattern != null) {
|
||||
builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexPattern);
|
||||
|
@ -350,7 +363,8 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
AutoFollowPattern that = (AutoFollowPattern) o;
|
||||
return Objects.equals(leaderIndexPatterns, that.leaderIndexPatterns) &&
|
||||
return Objects.equals(leaderCluster, that.leaderCluster) &&
|
||||
Objects.equals(leaderIndexPatterns, that.leaderIndexPatterns) &&
|
||||
Objects.equals(followIndexPattern, that.followIndexPattern) &&
|
||||
Objects.equals(maxBatchOperationCount, that.maxBatchOperationCount) &&
|
||||
Objects.equals(maxConcurrentReadBatches, that.maxConcurrentReadBatches) &&
|
||||
|
@ -364,6 +378,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable<MetaData.Custom> i
|
|||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(
|
||||
leaderCluster,
|
||||
leaderIndexPatterns,
|
||||
followIndexPattern,
|
||||
maxBatchOperationCount,
|
||||
|
|
|
@ -48,6 +48,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
private static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes");
|
||||
private static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version");
|
||||
private static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis");
|
||||
private static final ParseField TOTAL_FETCH_LEADER_TIME_MILLIS_FIELD = new ParseField("total_fetch_leader_time_millis");
|
||||
private static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches");
|
||||
private static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches");
|
||||
private static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received");
|
||||
|
@ -87,12 +88,13 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
(long) args[19],
|
||||
(long) args[20],
|
||||
(long) args[21],
|
||||
(long) args[22],
|
||||
new TreeMap<>(
|
||||
((List<Map.Entry<Long, Tuple<Integer, ElasticsearchException>>>) args[22])
|
||||
((List<Map.Entry<Long, Tuple<Integer, ElasticsearchException>>>) args[23])
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))),
|
||||
(long) args[23],
|
||||
(ElasticsearchException) args[24]));
|
||||
(long) args[24],
|
||||
(ElasticsearchException) args[25]));
|
||||
|
||||
public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry";
|
||||
|
||||
|
@ -116,6 +118,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD);
|
||||
STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD);
|
||||
STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD);
|
||||
STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_LEADER_TIME_MILLIS_FIELD);
|
||||
STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD);
|
||||
STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD);
|
||||
STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD);
|
||||
|
@ -228,6 +231,12 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
return totalFetchTimeMillis;
|
||||
}
|
||||
|
||||
private final long totalFetchLeaderTimeMillis;
|
||||
|
||||
public long totalFetchLeaderTimeMillis() {
|
||||
return totalFetchLeaderTimeMillis;
|
||||
}
|
||||
|
||||
private final long numberOfSuccessfulFetches;
|
||||
|
||||
public long numberOfSuccessfulFetches() {
|
||||
|
@ -309,6 +318,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
final int numberOfQueuedWrites,
|
||||
final long mappingVersion,
|
||||
final long totalFetchTimeMillis,
|
||||
final long totalFetchLeaderTimeMillis,
|
||||
final long numberOfSuccessfulFetches,
|
||||
final long numberOfFailedFetches,
|
||||
final long operationsReceived,
|
||||
|
@ -334,6 +344,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
this.numberOfQueuedWrites = numberOfQueuedWrites;
|
||||
this.mappingVersion = mappingVersion;
|
||||
this.totalFetchTimeMillis = totalFetchTimeMillis;
|
||||
this.totalFetchLeaderTimeMillis = totalFetchLeaderTimeMillis;
|
||||
this.numberOfSuccessfulFetches = numberOfSuccessfulFetches;
|
||||
this.numberOfFailedFetches = numberOfFailedFetches;
|
||||
this.operationsReceived = operationsReceived;
|
||||
|
@ -362,6 +373,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
this.numberOfQueuedWrites = in.readVInt();
|
||||
this.mappingVersion = in.readVLong();
|
||||
this.totalFetchTimeMillis = in.readVLong();
|
||||
this.totalFetchLeaderTimeMillis = in.readVLong();
|
||||
this.numberOfSuccessfulFetches = in.readVLong();
|
||||
this.numberOfFailedFetches = in.readVLong();
|
||||
this.operationsReceived = in.readVLong();
|
||||
|
@ -397,6 +409,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
out.writeVInt(numberOfQueuedWrites);
|
||||
out.writeVLong(mappingVersion);
|
||||
out.writeVLong(totalFetchTimeMillis);
|
||||
out.writeVLong(totalFetchLeaderTimeMillis);
|
||||
out.writeVLong(numberOfSuccessfulFetches);
|
||||
out.writeVLong(numberOfFailedFetches);
|
||||
out.writeVLong(operationsReceived);
|
||||
|
@ -444,6 +457,10 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_fetch_time",
|
||||
new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS));
|
||||
builder.humanReadableField(
|
||||
TOTAL_FETCH_LEADER_TIME_MILLIS_FIELD.getPreferredName(),
|
||||
"total_fetch_leader_time",
|
||||
new TimeValue(totalFetchLeaderTimeMillis, TimeUnit.MILLISECONDS));
|
||||
builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches);
|
||||
builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches);
|
||||
builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived);
|
||||
|
@ -516,6 +533,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
numberOfQueuedWrites == that.numberOfQueuedWrites &&
|
||||
mappingVersion == that.mappingVersion &&
|
||||
totalFetchTimeMillis == that.totalFetchTimeMillis &&
|
||||
totalFetchLeaderTimeMillis == that.totalFetchLeaderTimeMillis &&
|
||||
numberOfSuccessfulFetches == that.numberOfSuccessfulFetches &&
|
||||
numberOfFailedFetches == that.numberOfFailedFetches &&
|
||||
operationsReceived == that.operationsReceived &&
|
||||
|
@ -552,6 +570,7 @@ public class ShardFollowNodeTaskStatus implements Task.Status {
|
|||
numberOfQueuedWrites,
|
||||
mappingVersion,
|
||||
totalFetchTimeMillis,
|
||||
totalFetchLeaderTimeMillis,
|
||||
numberOfSuccessfulFetches,
|
||||
numberOfFailedFetches,
|
||||
operationsReceived,
|
||||
|
|
|
@ -33,35 +33,35 @@ public class DeleteAutoFollowPatternAction extends Action<AcknowledgedResponse>
|
|||
|
||||
public static class Request extends AcknowledgedRequest<Request> {
|
||||
|
||||
private String leaderCluster;
|
||||
private String name;
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (leaderCluster == null) {
|
||||
validationException = addValidationError("leaderCluster is missing", validationException);
|
||||
if (name == null) {
|
||||
validationException = addValidationError("name is missing", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
public String getLeaderCluster() {
|
||||
return leaderCluster;
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setLeaderCluster(String leaderCluster) {
|
||||
this.leaderCluster = leaderCluster;
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
leaderCluster = in.readString();
|
||||
name = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(leaderCluster);
|
||||
out.writeString(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,12 +69,12 @@ public class DeleteAutoFollowPatternAction extends Action<AcknowledgedResponse>
|
|||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Request request = (Request) o;
|
||||
return Objects.equals(leaderCluster, request.leaderCluster);
|
||||
return Objects.equals(name, request.name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(leaderCluster);
|
||||
return Objects.hash(name);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,14 +36,14 @@ public class GetAutoFollowPatternAction extends Action<GetAutoFollowPatternActio
|
|||
|
||||
public static class Request extends MasterNodeReadRequest<Request> {
|
||||
|
||||
private String leaderCluster;
|
||||
private String name;
|
||||
|
||||
public Request() {
|
||||
}
|
||||
|
||||
public Request(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
this.leaderCluster = in.readOptionalString();
|
||||
this.name = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -51,18 +51,18 @@ public class GetAutoFollowPatternAction extends Action<GetAutoFollowPatternActio
|
|||
return null;
|
||||
}
|
||||
|
||||
public String getLeaderCluster() {
|
||||
return leaderCluster;
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setLeaderCluster(String leaderCluster) {
|
||||
this.leaderCluster = leaderCluster;
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeOptionalString(leaderCluster);
|
||||
out.writeOptionalString(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -70,12 +70,12 @@ public class GetAutoFollowPatternAction extends Action<GetAutoFollowPatternActio
|
|||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Request request = (Request) o;
|
||||
return Objects.equals(leaderCluster, request.leaderCluster);
|
||||
return Objects.equals(name, request.name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(leaderCluster);
|
||||
return Objects.hash(name);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,8 +46,11 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
|
||||
private static final ObjectParser<Request, String> PARSER = new ObjectParser<>("put_auto_follow_pattern_request", Request::new);
|
||||
|
||||
private static final ParseField NAME_FIELD = new ParseField("name");
|
||||
|
||||
static {
|
||||
PARSER.declareString(Request::setLeaderCluster, LEADER_CLUSTER_FIELD);
|
||||
PARSER.declareString(Request::setName, NAME_FIELD);
|
||||
PARSER.declareString(Request::setLeaderCluster, AutoFollowPattern.LEADER_CLUSTER_FIELD);
|
||||
PARSER.declareStringArray(Request::setLeaderIndexPatterns, AutoFollowPattern.LEADER_PATTERNS_FIELD);
|
||||
PARSER.declareString(Request::setFollowIndexNamePattern, AutoFollowPattern.FOLLOW_PATTERN_FIELD);
|
||||
PARSER.declareInt(Request::setMaxBatchOperationCount, AutoFollowPattern.MAX_BATCH_OPERATION_COUNT);
|
||||
|
@ -67,20 +70,21 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
AutoFollowPattern.POLL_TIMEOUT, ObjectParser.ValueType.STRING);
|
||||
}
|
||||
|
||||
public static Request fromXContent(XContentParser parser, String remoteClusterAlias) throws IOException {
|
||||
public static Request fromXContent(XContentParser parser, String name) throws IOException {
|
||||
Request request = PARSER.parse(parser, null);
|
||||
if (remoteClusterAlias != null) {
|
||||
if (request.leaderCluster == null) {
|
||||
request.leaderCluster = remoteClusterAlias;
|
||||
if (name != null) {
|
||||
if (request.name == null) {
|
||||
request.name = name;
|
||||
} else {
|
||||
if (request.leaderCluster.equals(remoteClusterAlias) == false) {
|
||||
throw new IllegalArgumentException("provided leaderCluster is not equal");
|
||||
if (request.name.equals(name) == false) {
|
||||
throw new IllegalArgumentException("provided name is not equal");
|
||||
}
|
||||
}
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
private String name;
|
||||
private String leaderCluster;
|
||||
private List<String> leaderIndexPatterns;
|
||||
private String followIndexNamePattern;
|
||||
|
@ -96,8 +100,11 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (name == null) {
|
||||
validationException = addValidationError("[" + NAME_FIELD.getPreferredName() + "] is missing", validationException);
|
||||
}
|
||||
if (leaderCluster == null) {
|
||||
validationException = addValidationError("[" + LEADER_CLUSTER_FIELD.getPreferredName() +
|
||||
validationException = addValidationError("[" + AutoFollowPattern.LEADER_CLUSTER_FIELD.getPreferredName() +
|
||||
"] is missing", validationException);
|
||||
}
|
||||
if (leaderIndexPatterns == null || leaderIndexPatterns.isEmpty()) {
|
||||
|
@ -120,6 +127,14 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
return validationException;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public void setName(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
public String getLeaderCluster() {
|
||||
return leaderCluster;
|
||||
}
|
||||
|
@ -203,6 +218,7 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
name = in.readString();
|
||||
leaderCluster = in.readString();
|
||||
leaderIndexPatterns = in.readList(StreamInput::readString);
|
||||
followIndexNamePattern = in.readOptionalString();
|
||||
|
@ -218,6 +234,7 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(name);
|
||||
out.writeString(leaderCluster);
|
||||
out.writeStringList(leaderIndexPatterns);
|
||||
out.writeOptionalString(followIndexNamePattern);
|
||||
|
@ -234,7 +251,8 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(LEADER_CLUSTER_FIELD.getPreferredName(), leaderCluster);
|
||||
builder.field(NAME_FIELD.getPreferredName(), name);
|
||||
builder.field(AutoFollowPattern.LEADER_CLUSTER_FIELD.getPreferredName(), leaderCluster);
|
||||
builder.field(AutoFollowPattern.LEADER_PATTERNS_FIELD.getPreferredName(), leaderIndexPatterns);
|
||||
if (followIndexNamePattern != null) {
|
||||
builder.field(AutoFollowPattern.FOLLOW_PATTERN_FIELD.getPreferredName(), followIndexNamePattern);
|
||||
|
@ -270,7 +288,8 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
Request request = (Request) o;
|
||||
return Objects.equals(leaderCluster, request.leaderCluster) &&
|
||||
return Objects.equals(name, request.name) &&
|
||||
Objects.equals(leaderCluster, request.leaderCluster) &&
|
||||
Objects.equals(leaderIndexPatterns, request.leaderIndexPatterns) &&
|
||||
Objects.equals(followIndexNamePattern, request.followIndexNamePattern) &&
|
||||
Objects.equals(maxBatchOperationCount, request.maxBatchOperationCount) &&
|
||||
|
@ -285,6 +304,7 @@ public class PutAutoFollowPatternAction extends Action<AcknowledgedResponse> {
|
|||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(
|
||||
name,
|
||||
leaderCluster,
|
||||
leaderIndexPatterns,
|
||||
followIndexNamePattern,
|
||||
|
|
|
@ -7,22 +7,29 @@ package org.elasticsearch.xpack.core.rollup.action;
|
|||
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
|
||||
import org.elasticsearch.xpack.core.rollup.RollupField;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class DeleteRollupJobAction extends Action<AcknowledgedResponse> {
|
||||
public class DeleteRollupJobAction extends Action<DeleteRollupJobAction.Response> {
|
||||
|
||||
public static final DeleteRollupJobAction INSTANCE = new DeleteRollupJobAction();
|
||||
public static final String NAME = "cluster:admin/xpack/rollup/delete";
|
||||
|
@ -32,11 +39,11 @@ public class DeleteRollupJobAction extends Action<AcknowledgedResponse> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
public Response newResponse() {
|
||||
return new Response();
|
||||
}
|
||||
|
||||
public static class Request extends AcknowledgedRequest<Request> implements ToXContent {
|
||||
public static class Request extends BaseTasksRequest<Request> implements ToXContentFragment {
|
||||
private String id;
|
||||
|
||||
public Request(String id) {
|
||||
|
@ -45,6 +52,11 @@ public class DeleteRollupJobAction extends Action<AcknowledgedResponse> {
|
|||
|
||||
public Request() {}
|
||||
|
||||
@Override
|
||||
public boolean match(Task task) {
|
||||
return task.getDescription().equals(RollupField.NAME + "_" + id);
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
@ -90,10 +102,74 @@ public class DeleteRollupJobAction extends Action<AcknowledgedResponse> {
|
|||
}
|
||||
}
|
||||
|
||||
public static class RequestBuilder extends MasterNodeOperationRequestBuilder<Request, AcknowledgedResponse, RequestBuilder> {
|
||||
|
||||
public static class RequestBuilder extends ActionRequestBuilder<DeleteRollupJobAction.Request, DeleteRollupJobAction.Response> {
|
||||
protected RequestBuilder(ElasticsearchClient client, DeleteRollupJobAction action) {
|
||||
super(client, action, new Request());
|
||||
super(client, action, new DeleteRollupJobAction.Request());
|
||||
}
|
||||
}
|
||||
|
||||
public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject {
|
||||
|
||||
private boolean acknowledged;
|
||||
|
||||
public Response(StreamInput in) throws IOException {
|
||||
super(Collections.emptyList(), Collections.emptyList());
|
||||
readFrom(in);
|
||||
}
|
||||
|
||||
public Response(boolean acknowledged, List<TaskOperationFailure> taskFailures, List<FailedNodeException> nodeFailures) {
|
||||
super(taskFailures, nodeFailures);
|
||||
this.acknowledged = acknowledged;
|
||||
}
|
||||
|
||||
public Response(boolean acknowledged) {
|
||||
super(Collections.emptyList(), Collections.emptyList());
|
||||
this.acknowledged = acknowledged;
|
||||
}
|
||||
|
||||
public Response() {
|
||||
super(Collections.emptyList(), Collections.emptyList());
|
||||
this.acknowledged = false;
|
||||
}
|
||||
|
||||
public boolean isDeleted() {
|
||||
return acknowledged;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
acknowledged = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
toXContentCommon(builder, params);
|
||||
builder.field("acknowledged", acknowledged);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
DeleteRollupJobAction.Response response = (DeleteRollupJobAction.Response) o;
|
||||
return super.equals(o) && acknowledged == response.acknowledged;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), acknowledged);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -971,6 +971,9 @@
|
|||
"total_fetch_time_millis": {
|
||||
"type": "long"
|
||||
},
|
||||
"total_fetch_leader_time_millis": {
|
||||
"type": "long"
|
||||
},
|
||||
"number_of_successful_fetches": {
|
||||
"type": "long"
|
||||
},
|
||||
|
|
|
@ -5,103 +5,101 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.rollup.action;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.persistent.PersistentTasksService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.indexing.IndexerState;
|
||||
import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction;
|
||||
import org.elasticsearch.xpack.core.rollup.job.RollupJob;
|
||||
import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus;
|
||||
import org.elasticsearch.xpack.rollup.job.RollupJobTask;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
public class TransportDeleteRollupJobAction
|
||||
extends TransportMasterNodeAction<DeleteRollupJobAction.Request, AcknowledgedResponse> {
|
||||
|
||||
private final PersistentTasksService persistentTasksService;
|
||||
public class TransportDeleteRollupJobAction extends TransportTasksAction<RollupJobTask, DeleteRollupJobAction.Request,
|
||||
DeleteRollupJobAction.Response, DeleteRollupJobAction.Response> {
|
||||
|
||||
@Inject
|
||||
public TransportDeleteRollupJobAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
PersistentTasksService persistentTasksService, ClusterService clusterService) {
|
||||
super(settings, DeleteRollupJobAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, DeleteRollupJobAction.Request::new);
|
||||
this.persistentTasksService = persistentTasksService;
|
||||
public TransportDeleteRollupJobAction(Settings settings, TransportService transportService,
|
||||
ActionFilters actionFilters, ClusterService clusterService) {
|
||||
super(settings, DeleteRollupJobAction.NAME, clusterService, transportService, actionFilters,
|
||||
DeleteRollupJobAction.Request::new, DeleteRollupJobAction.Response::new, ThreadPool.Names.SAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
protected void doExecute(Task task, DeleteRollupJobAction.Request request, ActionListener<DeleteRollupJobAction.Response> listener) {
|
||||
final ClusterState state = clusterService.state();
|
||||
final DiscoveryNodes nodes = state.nodes();
|
||||
|
||||
if (nodes.isLocalNodeElectedMaster()) {
|
||||
PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) {
|
||||
super.doExecute(task, request, listener);
|
||||
} else {
|
||||
// If we couldn't find the job in the persistent task CS, it means it was deleted prior to this call,
|
||||
// no need to go looking for the allocated task
|
||||
listener.onFailure(new ResourceNotFoundException("the task with id [" + request.getId() + "] doesn't exist"));
|
||||
}
|
||||
|
||||
} else {
|
||||
// Delegates DeleteJob to elected master node, so it becomes the coordinating node.
|
||||
// Non-master nodes may have a stale cluster state that shows jobs which are cancelled
|
||||
// on the master, which makes testing difficult.
|
||||
if (nodes.getMasterNode() == null) {
|
||||
listener.onFailure(new MasterNotDiscoveredException("no known master nodes"));
|
||||
} else {
|
||||
transportService.sendRequest(nodes.getMasterNode(), actionName, request,
|
||||
new ActionListenerResponseHandler<>(listener, DeleteRollupJobAction.Response::new));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
protected void taskOperation(DeleteRollupJobAction.Request request, RollupJobTask jobTask,
|
||||
ActionListener<DeleteRollupJobAction.Response> listener) {
|
||||
|
||||
assert jobTask.getConfig().getId().equals(request.getId());
|
||||
IndexerState state = ((RollupJobStatus) jobTask.getStatus()).getIndexerState();
|
||||
if (state.equals(IndexerState.STOPPED) ) {
|
||||
jobTask.onCancelled();
|
||||
listener.onResponse(new DeleteRollupJobAction.Response(true));
|
||||
} else {
|
||||
listener.onFailure(new IllegalStateException("Could not delete job [" + request.getId() + "] because " +
|
||||
"indexer state is [" + state + "]. Job must be [" + IndexerState.STOPPED + "] before deletion."));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(DeleteRollupJobAction.Request request, ClusterState state,
|
||||
ActionListener<AcknowledgedResponse> listener) throws Exception {
|
||||
|
||||
String jobId = request.getId();
|
||||
TimeValue timeout = new TimeValue(60, TimeUnit.SECONDS); // TODO make this a config option
|
||||
|
||||
// Step 1. Cancel the persistent task
|
||||
persistentTasksService.sendRemoveRequest(jobId, new ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<?> persistentTask) {
|
||||
logger.debug("Request to cancel Task for Rollup job [" + jobId + "] successful.");
|
||||
|
||||
// Step 2. Wait for the task to finish cancellation internally
|
||||
persistentTasksService.waitForPersistentTaskCondition(jobId, Objects::isNull, timeout,
|
||||
new PersistentTasksService.WaitForPersistentTaskListener<RollupJob>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<RollupJob> task) {
|
||||
logger.debug("Task for Rollup job [" + jobId + "] successfully canceled.");
|
||||
listener.onResponse(new AcknowledgedResponse(true));
|
||||
protected DeleteRollupJobAction.Response newResponse(DeleteRollupJobAction.Request request, List<DeleteRollupJobAction.Response> tasks,
|
||||
List<TaskOperationFailure> taskOperationFailures,
|
||||
List<FailedNodeException> failedNodeExceptions) {
|
||||
// There should theoretically only be one task running the rollup job
|
||||
// If there are more, in production it should be ok as long as they are acknowledge shutting down.
|
||||
// But in testing we'd like to know there were more than one hence the assert
|
||||
assert tasks.size() + taskOperationFailures.size() == 1;
|
||||
boolean cancelled = tasks.size() > 0 && tasks.stream().allMatch(DeleteRollupJobAction.Response::isDeleted);
|
||||
return new DeleteRollupJobAction.Response(cancelled, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error("Error while cancelling task for Rollup job [" + jobId
|
||||
+ "]." + e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
String msg = "Stopping of Rollup job [" + jobId + "] timed out after [" + timeout + "].";
|
||||
logger.warn(msg);
|
||||
listener.onFailure(new ElasticsearchException(msg));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error("Error while requesting to cancel task for Rollup job [" + jobId
|
||||
+ "]" + e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeleteRollupJobAction.Request request, ClusterState state) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
|
||||
protected DeleteRollupJobAction.Response readTaskResponse(StreamInput in) throws IOException {
|
||||
DeleteRollupJobAction.Response response = new DeleteRollupJobAction.Response();
|
||||
response.readFrom(in);
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -349,7 +349,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE
|
|||
* shut down from the inside.
|
||||
*/
|
||||
@Override
|
||||
protected synchronized void onCancelled() {
|
||||
public synchronized void onCancelled() {
|
||||
logger.info("Received cancellation request for Rollup job [" + job.getConfig().getId() + "], state: [" + indexer.getState() + "]");
|
||||
if (indexer.abort()) {
|
||||
// there is no background job running, we can shutdown safely
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction;
|
||||
import org.elasticsearch.xpack.rollup.Rollup;
|
||||
|
@ -31,7 +32,16 @@ public class RestDeleteRollupJobAction extends BaseRestHandler {
|
|||
String id = restRequest.param(ID.getPreferredName());
|
||||
DeleteRollupJobAction.Request request = new DeleteRollupJobAction.Request(id);
|
||||
|
||||
return channel -> client.execute(DeleteRollupJobAction.INSTANCE, request, new RestToXContentListener<>(channel));
|
||||
return channel -> client.execute(DeleteRollupJobAction.INSTANCE, request,
|
||||
new RestToXContentListener<DeleteRollupJobAction.Response>(channel) {
|
||||
@Override
|
||||
protected RestStatus getStatus(DeleteRollupJobAction.Response response) {
|
||||
if (response.getNodeFailures().size() > 0 || response.getTaskFailures().size() > 0) {
|
||||
return RestStatus.INTERNAL_SERVER_ERROR;
|
||||
}
|
||||
return RestStatus.OK;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
|
@ -26,7 +26,7 @@ if (project.inFipsJvm) {
|
|||
test.enabled = false
|
||||
// Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are
|
||||
// not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS.
|
||||
tasks.withType(ForbiddenApisCliTask) {
|
||||
tasks.withType(CheckForbiddenApis) {
|
||||
bundledSignatures -= "jdk-non-portable"
|
||||
}
|
||||
// FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit,
|
||||
|
|
|
@ -46,9 +46,9 @@ import static org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResol
|
|||
|
||||
class IndicesAndAliasesResolver {
|
||||
|
||||
//`*,-*` what we replace indices with if we need Elasticsearch to return empty responses without throwing exception
|
||||
private static final String[] NO_INDICES_ARRAY = new String[] { "*", "-*" };
|
||||
static final List<String> NO_INDICES_LIST = Arrays.asList(NO_INDICES_ARRAY);
|
||||
//`*,-*` what we replace indices and aliases with if we need Elasticsearch to return empty responses without throwing exception
|
||||
static final String[] NO_INDICES_OR_ALIASES_ARRAY = new String[] { "*", "-*" };
|
||||
static final List<String> NO_INDICES_OR_ALIASES_LIST = Arrays.asList(NO_INDICES_OR_ALIASES_ARRAY);
|
||||
|
||||
private final IndexNameExpressionResolver nameExpressionResolver;
|
||||
private final RemoteClusterResolver remoteClusterResolver;
|
||||
|
@ -165,7 +165,7 @@ class IndicesAndAliasesResolver {
|
|||
//this is how we tell es core to return an empty response, we can let the request through being sure
|
||||
//that the '-*' wildcard expression will be resolved to no indices. We can't let empty indices through
|
||||
//as that would be resolved to _all by es core.
|
||||
replaceable.indices(NO_INDICES_ARRAY);
|
||||
replaceable.indices(NO_INDICES_OR_ALIASES_ARRAY);
|
||||
indicesReplacedWithNoIndices = true;
|
||||
resolvedIndicesBuilder.addLocal(NO_INDEX_PLACEHOLDER);
|
||||
} else {
|
||||
|
@ -176,8 +176,6 @@ class IndicesAndAliasesResolver {
|
|||
}
|
||||
} else {
|
||||
if (containsWildcards(indicesRequest)) {
|
||||
//an alias can still contain '*' in its name as of 5.0. Such aliases cannot be referred to when using
|
||||
//the security plugin, otherwise the following exception gets thrown
|
||||
throw new IllegalStateException("There are no external requests known to support wildcards that don't support replacing " +
|
||||
"their indices");
|
||||
}
|
||||
|
@ -198,8 +196,6 @@ class IndicesAndAliasesResolver {
|
|||
if (aliasesRequest.expandAliasesWildcards()) {
|
||||
List<String> aliases = replaceWildcardsWithAuthorizedAliases(aliasesRequest.aliases(),
|
||||
loadAuthorizedAliases(authorizedIndices.get(), metaData));
|
||||
//it may be that we replace aliases with an empty array, in case there are no authorized aliases for the action.
|
||||
//MetaData#findAliases will return nothing when some alias was originally requested, which was replaced with empty.
|
||||
aliasesRequest.replaceAliases(aliases.toArray(new String[aliases.size()]));
|
||||
}
|
||||
if (indicesReplacedWithNoIndices) {
|
||||
|
@ -213,6 +209,13 @@ class IndicesAndAliasesResolver {
|
|||
} else {
|
||||
resolvedIndicesBuilder.addLocal(aliasesRequest.aliases());
|
||||
}
|
||||
// if no aliases are authorized, then fill in an expression that
|
||||
// MetaData#findAliases evaluates to the empty alias list. You cannot put
|
||||
// "nothing" (the empty list) explicitly because this is resolved by es core to
|
||||
// _all
|
||||
if (aliasesRequest.aliases().length == 0) {
|
||||
aliasesRequest.replaceAliases(NO_INDICES_OR_ALIASES_ARRAY);
|
||||
}
|
||||
}
|
||||
return resolvedIndicesBuilder.build();
|
||||
}
|
||||
|
|
|
@ -818,7 +818,7 @@ public class AuthorizationServiceTests extends ESTestCase {
|
|||
final SearchRequest searchRequest = new SearchRequest("_all");
|
||||
authorize(authentication, SearchAction.NAME, searchRequest);
|
||||
assertEquals(2, searchRequest.indices().length);
|
||||
assertEquals(IndicesAndAliasesResolver.NO_INDICES_LIST, Arrays.asList(searchRequest.indices()));
|
||||
assertEquals(IndicesAndAliasesResolver.NO_INDICES_OR_ALIASES_LIST, Arrays.asList(searchRequest.indices()));
|
||||
}
|
||||
|
||||
public void testGrantedNonXPackUserCanExecuteMonitoringOperationsAgainstSecurityIndex() {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue