diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 9918d54d707..dce14b10fcb 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -17,12 +17,22 @@ * under the License. */ import java.nio.file.Files +import org.gradle.util.GradleVersion plugins { id 'java-gradle-plugin' id 'groovy' } +gradlePlugin { + plugins { + simplePlugin { + id = 'elasticsearch.clusterformation' + implementationClass = 'org.elasticsearch.gradle.clusterformation.ClusterformationPlugin' + } + } +} + group = 'org.elasticsearch.gradle' String minimumGradleVersion = file('src/main/resources/minimumGradleVersion').text.trim() @@ -166,7 +176,6 @@ if (project != rootProject) { it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} } exclude "**/*Tests.class" - include "**/*IT.class" testClassesDirs = sourceSets.test.output.classesDirs classpath = sourceSets.test.runtimeClasspath inputs.dir(file("src/testKit")) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy index 4299efd95a3..119a0276499 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy @@ -22,8 +22,8 @@ package org.elasticsearch.gradle.precommit import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.elasticsearch.gradle.LoggedExec import org.gradle.api.file.FileCollection +import org.gradle.api.tasks.Classpath import org.gradle.api.tasks.OutputFile - /** * Runs CheckJarHell on a classpath. */ @@ -35,9 +35,13 @@ public class JarHellTask extends LoggedExec { * inputs (ie the jars/class files). */ @OutputFile - File successMarker = new File(project.buildDir, 'markers/jarHell') + File successMarker + + @Classpath + FileCollection classpath public JarHellTask() { + successMarker = new File(project.buildDir, 'markers/jarHell-' + getName()) project.afterEvaluate { FileCollection classpath = project.sourceSets.test.runtimeClasspath if (project.plugins.hasPlugin(ShadowPlugin)) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 60469622484..be7561853bb 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -31,7 +31,7 @@ class PrecommitTasks { /** Adds a precommit task, which depends on non-test verification tasks. */ public static Task create(Project project, boolean includeDependencyLicenses) { - Configuration forbiddenApisConfiguration = project.configurations.create("forbiddenApisCliJar") + project.configurations.create("forbiddenApisCliJar") project.dependencies { forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5') } @@ -43,7 +43,7 @@ class PrecommitTasks { project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('licenseHeaders', LicenseHeadersTask.class), project.tasks.create('filepermissions', FilePermissionsTask.class), - project.tasks.create('jarHell', JarHellTask.class), + configureJarHell(project), configureThirdPartyAudit(project) ] @@ -80,6 +80,12 @@ class PrecommitTasks { return project.tasks.create(precommitOptions) } + private static Task configureJarHell(Project project) { + Task task = project.tasks.create('jarHell', JarHellTask.class) + task.classpath = project.sourceSets.test.runtimeClasspath + return task + } + private static Task configureThirdPartyAudit(Project project) { ThirdPartyAuditTask thirdPartyAuditTask = project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class) ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources') diff --git a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java new file mode 100644 index 00000000000..6d256ba0449 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch; + +import org.gradle.api.Action; +import org.gradle.api.Project; +import org.gradle.api.file.CopySpec; +import org.gradle.api.file.FileTree; +import org.gradle.api.tasks.WorkResult; +import org.gradle.process.ExecResult; +import org.gradle.process.JavaExecSpec; + +import java.io.File; + +/** + * Facilitate access to Gradle services without a direct dependency on Project. + * + * In a future release Gradle will offer service injection, this adapter plays that role until that time. + * It exposes the service methods that are part of the public API as the classes implementing them are not. + * Today service injection is not available for + * extensions. + * + * Everything exposed here must be thread safe. That is the very reason why project is not passed in directly. + */ +public class GradleServicesAdapter { + + public final Project project; + + public GradleServicesAdapter(Project project) { + this.project = project; + } + + public static GradleServicesAdapter getInstance(Project project) { + return new GradleServicesAdapter(project); + } + + public WorkResult copy(Action action) { + return project.copy(action); + } + + public WorkResult sync(Action action) { + return project.sync(action); + } + + public ExecResult javaexec(Action action) { + return project.javaexec(action); + } + + public FileTree zipTree(File zipPath) { + return project.zipTree(zipPath); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java new file mode 100644 index 00000000000..c926e70b3f7 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +public enum Distribution { + + INTEG_TEST("integ-test-zip"), + ZIP("zip"), + ZIP_OSS("zip-oss"); + + private final String name; + + Distribution(String name) { + this.name = name; + } + + public String getName() { + return name; + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java new file mode 100644 index 00000000000..779e7b61ed9 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ClusterformationPlugin.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import groovy.lang.Closure; +import org.elasticsearch.GradleServicesAdapter; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.Task; +import org.gradle.api.execution.TaskActionListener; +import org.gradle.api.execution.TaskExecutionListener; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.plugins.ExtraPropertiesExtension; +import org.gradle.api.tasks.TaskState; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ClusterformationPlugin implements Plugin { + + public static final String LIST_TASK_NAME = "listElasticSearchClusters"; + public static final String EXTENSION_NAME = "elasticSearchClusters"; + + private final Logger logger = Logging.getLogger(ClusterformationPlugin.class); + + @Override + public void apply(Project project) { + NamedDomainObjectContainer container = project.container( + ElasticsearchNode.class, + (name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project)) + ); + project.getExtensions().add(EXTENSION_NAME, container); + + Task listTask = project.getTasks().create(LIST_TASK_NAME); + listTask.setGroup("ES cluster formation"); + listTask.setDescription("Lists all ES clusters configured for this project"); + listTask.doLast((Task task) -> + container.forEach((ElasticsearchConfiguration cluster) -> + logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution()) + ) + ); + + Map> taskToCluster = new HashMap<>(); + + // register an extension for all current and future tasks, so that any task can declare that it wants to use a + // specific cluster. + project.getTasks().all((Task task) -> + task.getExtensions().findByType(ExtraPropertiesExtension.class) + .set( + "useCluster", + new Closure(this, this) { + public void doCall(ElasticsearchConfiguration conf) { + taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf); + } + }) + ); + + project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> + taskExecutionGraph.getAllTasks() + .forEach(task -> + taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchConfiguration::claim) + ) + ); + project.getGradle().addListener( + new TaskActionListener() { + @Override + public void beforeActions(Task task) { + // we only start the cluster before the actions, so we'll not start it if the task is up-to-date + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::start); + } + @Override + public void afterActions(Task task) {} + } + ); + project.getGradle().addListener( + new TaskExecutionListener() { + @Override + public void afterExecute(Task task, TaskState state) { + // always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the + // cluster to start. + taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::unClaimAndStop); + } + @Override + public void beforeExecute(Task task) {} + } + ); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java new file mode 100644 index 00000000000..913d88e9fa1 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchConfiguration.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; + +import java.util.concurrent.Future; + +public interface ElasticsearchConfiguration { + String getName(); + + Version getVersion(); + + void setVersion(Version version); + + default void setVersion(String version) { + setVersion(Version.fromString(version)); + } + + Distribution getDistribution(); + + void setDistribution(Distribution distribution); + + void claim(); + + Future start(); + + void unClaimAndStop(); +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java new file mode 100644 index 00000000000..8b78fc2b627 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/clusterformation/ElasticsearchNode.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.GradleServicesAdapter; +import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; + +import java.util.Objects; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +public class ElasticsearchNode implements ElasticsearchConfiguration { + + private final String name; + private final GradleServicesAdapter services; + private final AtomicInteger noOfClaims = new AtomicInteger(); + private final AtomicBoolean started = new AtomicBoolean(false); + private final Logger logger = Logging.getLogger(ElasticsearchNode.class); + + private Distribution distribution; + private Version version; + + public ElasticsearchNode(String name, GradleServicesAdapter services) { + this.name = name; + this.services = services; + } + + @Override + public String getName() { + return name; + } + + @Override + public Version getVersion() { + return version; + } + + @Override + public void setVersion(Version version) { + checkNotRunning(); + this.version = version; + } + + @Override + public Distribution getDistribution() { + return distribution; + } + + @Override + public void setDistribution(Distribution distribution) { + checkNotRunning(); + this.distribution = distribution; + } + + @Override + public void claim() { + noOfClaims.incrementAndGet(); + } + + /** + * Start the cluster if not running. Does nothing if the cluster is already running. + * + * @return future of thread running in the background + */ + @Override + public Future start() { + if (started.getAndSet(true)) { + logger.lifecycle("Already started cluster: {}", name); + } else { + logger.lifecycle("Starting cluster: {}", name); + } + return null; + } + + /** + * Stops a running cluster if it's not claimed. Does nothing otherwise. + */ + @Override + public void unClaimAndStop() { + int decrementedClaims = noOfClaims.decrementAndGet(); + if (decrementedClaims > 0) { + logger.lifecycle("Not stopping {}, since cluster still has {} claim(s)", name, decrementedClaims); + return; + } + if (started.get() == false) { + logger.lifecycle("Asked to unClaimAndStop, but cluster was not running: {}", name); + return; + } + logger.lifecycle("Stopping {}, number of claims is {}", name, decrementedClaims); + } + + private void checkNotRunning() { + if (started.get()) { + throw new IllegalStateException("Configuration can not be altered while running "); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ElasticsearchNode that = (ElasticsearchNode) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index 899dd4f5927..9add3349f9e 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -4.9 \ No newline at end of file +4.10 \ No newline at end of file diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 3e18b0b80af..aca99067011 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -153,17 +153,4 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase { } } - private String getLocalTestRepoPath() { - String property = System.getProperty("test.local-test-repo-path"); - Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); - File file = new File(property); - assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); - if (File.separator.equals("\\")) { - // Use / on Windows too, the build script is not happy with \ - return file.getAbsolutePath().replace(File.separator, "/"); - } else { - return file.getAbsolutePath(); - } - } - } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 98fea2ea15a..99afd0bcbe0 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -40,7 +40,7 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe .withArguments("buildResources", "-s", "-i") .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); + assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); @@ -61,8 +61,8 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":buildResources"); - assertTaskSuccessfull(result, ":sampleCopyAll"); + assertTaskSuccessful(result, ":buildResources"); + assertTaskSuccessful(result, ":sampleCopyAll"); assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle.xml"); // This is a side effect of compile time reference assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle_suppressions.xml"); @@ -75,7 +75,7 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe .withPluginClasspath() .build(); - assertTaskSuccessfull(result, ":sample"); + assertTaskSuccessful(result, ":sample"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java new file mode 100644 index 00000000000..c690557537d --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/clusterformation/ClusterformationPluginIT.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.clusterformation; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; +import org.gradle.testkit.runner.TaskOutcome; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +public class ClusterformationPluginIT extends GradleIntegrationTestCase { + + public void testListClusters() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("listElasticSearchClusters", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":listElasticSearchClusters").getOutcome()); + assertOutputContains( + result.getOutput(), + " * myTestCluster:" + ); + + } + + public void testUseClusterByOne() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByOneWithDryRun() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "-s", "--dry-run") + .withPluginClasspath() + .build(); + + assertNull(result.task(":user1")); + assertOutputDoesNotContain( + result.getOutput(), + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByTwo() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("user1", "user2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Starting cluster: myTestCluster", + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "Stopping myTestCluster, number of claims is 0" + ); + } + + public void testUseClusterByUpToDateTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("upToDate1", "upToDate2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate1").getOutcome()); + assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void testUseClusterBySkippedTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "skipped2", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped2").getOutcome()); + assertOutputContains( + result.getOutput(), + "Not stopping myTestCluster, since cluster still has 1 claim(s)", + "cluster was not running: myTestCluster" + ); + assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); + } + + public void tetUseClusterBySkippedAndWorkingTask() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("clusterformation")) + .withArguments("skipped1", "user1", "-s") + .withPluginClasspath() + .build(); + + assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); + assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); + assertOutputContains( + result.getOutput(), + "> Task :user1", + "Starting cluster: myTestCluster", + "Stopping myTestCluster, number of claims is 0" + ); + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java new file mode 100644 index 00000000000..03f2022bc66 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -0,0 +1,42 @@ +package org.elasticsearch.gradle.precommit; + +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +public class JarHellTaskIT extends GradleIntegrationTestCase { + + public void testJarHellDetected() { + BuildResult result = GradleRunner.create() + .withProjectDir(getProjectDir("jarHell")) + .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .withPluginClasspath() + .buildAndFail(); + + assertTaskFailed(result, ":jarHell"); + assertOutputContains( + result.getOutput(), + "Exception in thread \"main\" java.lang.IllegalStateException: jar hell!", + "class: org.apache.logging.log4j.Logger" + ); + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f00ab406a6c..a1d4b86ab76 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -9,6 +9,7 @@ import java.io.File; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -66,15 +67,24 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { } } - protected void assertTaskSuccessfull(BuildResult result, String taskName) { + protected void assertTaskFailed(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.FAILED); + } + + protected void assertTaskSuccessful(BuildResult result, String taskName) { + assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + } + + private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { BuildTask task = result.task(taskName); if (task == null) { - fail("Expected task `" + taskName + "` to be successful, but it did not run"); + fail("Expected task `" + taskName + "` to be " + taskOutcome +", but it did not run" + + "\n\nOutput is:\n" + result.getOutput()); } assertEquals( "Expected task to be successful but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput() , - TaskOutcome.SUCCESS, + taskOutcome + "\n\nOutput is:\n" + result.getOutput() , + taskOutcome, task.getOutcome() ); } @@ -109,4 +119,17 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { Files.exists(absPath) ); } + + protected String getLocalTestRepoPath() { + String property = System.getProperty("test.local-test-repo-path"); + Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests"); + File file = new File(property); + assertTrue("Expected " + property + " to exist, but it did not!", file.exists()); + if (File.separator.equals("\\")) { + // Use / on Windows too, the build script is not happy with \ + return file.getAbsolutePath().replace(File.separator, "/"); + } else { + return file.getAbsolutePath(); + } + } } diff --git a/buildSrc/src/testKit/clusterformation/build.gradle b/buildSrc/src/testKit/clusterformation/build.gradle new file mode 100644 index 00000000000..ae9dd8a2c33 --- /dev/null +++ b/buildSrc/src/testKit/clusterformation/build.gradle @@ -0,0 +1,41 @@ +plugins { + id 'elasticsearch.clusterformation' +} + +elasticSearchClusters { + myTestCluster { + distribution = 'ZIP' + } +} + +task user1 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user1 executing" + } +} + +task user2 { + useCluster elasticSearchClusters.myTestCluster + doLast { + println "user2 executing" + } +} + +task upToDate1 { + useCluster elasticSearchClusters.myTestCluster +} + +task upToDate2 { + useCluster elasticSearchClusters.myTestCluster +} + +task skipped1 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} + +task skipped2 { + enabled = false + useCluster elasticSearchClusters.myTestCluster +} diff --git a/buildSrc/src/testKit/jarHell/build.gradle b/buildSrc/src/testKit/jarHell/build.gradle new file mode 100644 index 00000000000..17ff43fc740 --- /dev/null +++ b/buildSrc/src/testKit/jarHell/build.gradle @@ -0,0 +1,29 @@ +plugins { + id 'java' + id 'elasticsearch.build' +} + +dependencyLicenses.enabled = false +dependenciesInfo.enabled = false +forbiddenApisMain.enabled = false +forbiddenApisTest.enabled = false +thirdPartyAudit.enabled = false +namingConventions.enabled = false +ext.licenseFile = file("$buildDir/dummy/license") +ext.noticeFile = file("$buildDir/dummy/notice") + +repositories { + mavenCentral() + repositories { + maven { + url System.getProperty("local.repo.path") + } + } +} + +dependencies { + // Needed for the JarHell task + testCompile ("org.elasticsearch.test:framework:${versions.elasticsearch}") + // causes jar hell with local sources + compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" +} diff --git a/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java new file mode 100644 index 00000000000..a4332c664fa --- /dev/null +++ b/buildSrc/src/testKit/jarHell/src/main/java/org/apache/logging/log4j/Logger.java @@ -0,0 +1,7 @@ +package org.apache.logging.log4j; + +// Jar Hell ! +public class Logger { + +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index e114a868dda..e68bd266843 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -90,6 +90,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -1150,10 +1151,10 @@ final class RequestConverters { static Request xPackGraphExplore(GraphExploreRequest exploreRequest) throws IOException { String endpoint = endpoint(exploreRequest.indices(), exploreRequest.types(), "_xpack/graph/_explore"); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - request.setEntity(createEntity(exploreRequest, REQUEST_BODY_CONTENT_TYPE)); + request.setEntity(createEntity(exploreRequest, REQUEST_BODY_CONTENT_TYPE)); return request; - } - + } + static Request xPackWatcherPutWatch(PutWatchRequest putWatchRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -1436,11 +1437,16 @@ final class RequestConverters { Params withRefresh(boolean refresh) { if (refresh) { - return withRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + return withRefreshPolicy(RefreshPolicy.IMMEDIATE); } return this; } + /** + * @deprecated If creating a new HLRC ReST API call, use {@link RefreshPolicy} + * instead of {@link WriteRequest.RefreshPolicy} from the server project + */ + @Deprecated Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) { if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) { return putParam("refresh", refreshPolicy.getValue()); @@ -1448,6 +1454,13 @@ final class RequestConverters { return this; } + Params withRefreshPolicy(RefreshPolicy refreshPolicy) { + if (refreshPolicy != RefreshPolicy.NONE) { + return putParam("refresh", refreshPolicy.getValue()); + } + return this; + } + Params withRetryOnConflict(int retryOnConflict) { if (retryOnConflict > 0) { return putParam("retry_on_conflict", String.valueOf(retryOnConflict)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java new file mode 100644 index 00000000000..8b72f704edf --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/RefreshPolicy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +/** + * Enumeration of values that control the refresh policy for a request that + * supports specifying a refresh policy. + */ +public enum RefreshPolicy { + + /** + * Don't refresh after this request. The default. + */ + NONE("false"), + /** + * Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful + * to present a consistent view to for indices with very low traffic. And it is wonderful for tests! + */ + IMMEDIATE("true"), + /** + * Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is + * compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs. + */ + WAIT_UNTIL("wait_for"); + + private final String value; + + RefreshPolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + /** + * Get the default refresh policy, which is NONE + */ + public static RefreshPolicy getDefault() { + return RefreshPolicy.NONE; + } +} diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index c47786299bc..38be8db42ff 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -39,3 +39,9 @@ test { // TODO: find a way to add permissions for the tests in this module systemProperty 'tests.security.manager', 'false' } + +if (project.inFipsJvm) { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/docs/plugins/discovery-file.asciidoc b/docs/plugins/discovery-file.asciidoc index ad06cfc0cc5..4f2182da056 100644 --- a/docs/plugins/discovery-file.asciidoc +++ b/docs/plugins/discovery-file.asciidoc @@ -1,71 +1,14 @@ [[discovery-file]] === File-Based Discovery Plugin -The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file -in the `config/discovery-file` directory for unicast discovery. +The functionality provided by the `discovery-file` plugin is now available in +Elasticsearch without requiring a plugin. This plugin still exists to ensure +backwards compatibility, but it will be removed in a future version. + +On installation, this plugin creates a file at +`$ES_PATH_CONF/discovery-file/unicast_hosts.txt` that comprises comments that +describe how to use it. It is preferable not to install this plugin and instead +to create this file, and its containing directory, using standard tools. :plugin_name: discovery-file include::install_remove.asciidoc[] - -[[discovery-file-usage]] -[float] -==== Using the file-based discovery plugin - -The file-based discovery plugin provides the ability to specify the -unicast hosts list through a simple `unicast_hosts.txt` file that can -be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`: - -[source,yaml] ----- -discovery.zen.hosts_provider: file ----- - -This plugin simply provides a facility to supply the unicast hosts list for -zen discovery through an external file that can be updated at any time by a side process. - -For example, this gives a convenient mechanism for an Elasticsearch instance -that is run in docker containers to be dynamically supplied a list of IP -addresses to connect to for zen discovery when those IP addresses may not be -known at node startup. - -Note that the file-based discovery plugin is meant to augment the unicast -hosts list in `elasticsearch.yml` (if specified), not replace it. Therefore, -if there are valid unicast host entries in `discovery.zen.ping.unicast.hosts`, -they will be used in addition to those supplied in `unicast_hosts.txt`. - -Anytime a change is made to the `unicast_hosts.txt` file, even as Elasticsearch -continues to run, the new changes will be picked up by the plugin and the -new hosts list will be used for the next pinging round for master election. - -Upon installation of the plugin, a default `unicast_hosts.txt` file will -be found in the `$CONFIG_DIR/discovery-file` directory. This default file -will contain some comments about what the file should contain. All comments -for this file must appear on their lines starting with `#` (i.e. comments -cannot start in the middle of a line). - -[[discovery-file-format]] -[float] -==== unicast_hosts.txt file format - -The format of the file is to specify one unicast host entry per line. -Each unicast host entry consists of the host (host name or IP address) and -an optional transport port number. If the port number is specified, is must -come immediately after the host (on the same line) separated by a `:`. -If the port number is not specified, a default value of 9300 is used. - -For example, this is an example of `unicast_hosts.txt` for a cluster with -four nodes that participate in unicast discovery, some of which are not -running on the default port: - -[source,txt] ----------------------------------------------------------------- -10.10.10.5 -10.10.10.6:9305 -10.10.10.5:10005 -# an IPv6 address -[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 ----------------------------------------------------------------- - -Host names are allowed instead of IP addresses (similar to -`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be -specified in brackets with the port coming after the brackets. diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 82923257385..e3978e65f44 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -10,71 +10,66 @@ include::install_remove.asciidoc[] [[repository-gcs-usage]] ==== Getting started -The plugin uses the https://cloud.google.com/storage/docs/json_api/[Google Cloud Storage JSON API] (v1) -to connect to the Storage service. If this is the first time you use Google Cloud Storage, you first -need to connect to the https://console.cloud.google.com/[Google Cloud Platform Console] and create a new -project. Once your project is created, you must enable the Cloud Storage Service for your project. +The plugin uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage] +to connect to the Storage service. If you are using +https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you +must connect to the https://console.cloud.google.com/[Google Cloud Platform Console] +and create a new project. After your project is created, you must enable the +Cloud Storage Service for your project. [[repository-gcs-creating-bucket]] ===== Creating a Bucket -Google Cloud Storage service uses the concept of https://cloud.google.com/storage/docs/key-terms[Bucket] -as a container for all the data. Buckets are usually created using the -https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin will not automatically -create buckets. +The Google Cloud Storage service uses the concept of a +https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all +the data. Buckets are usually created using the +https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin +does not automatically create buckets. To create a new bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Click the "Create Bucket" button -5. Enter the name of the new bucket -6. Select a storage class -7. Select a location -8. Click the "Create" button +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Click the *Create Bucket* button. +5. Enter the name of the new bucket. +6. Select a storage class. +7. Select a location. +8. Click the *Create* button. -The bucket should now be created. +For more detailed instructions, see the +https://cloud.google.com/storage/docs/quickstart-console#create_a_bucket[Google Cloud documentation]. [[repository-gcs-service-authentication]] ===== Service Authentication -The plugin supports two authentication modes: - -* The built-in <>. This mode is -recommended if your Elasticsearch node is running on a Compute Engine virtual machine. - -* Specifying <> credentials. - -[[repository-gcs-using-compute-engine]] -===== Using Compute Engine -When running on Compute Engine, the plugin use Google's built-in authentication mechanism to -authenticate on the Storage service. Compute Engine virtual machines are usually associated to a -default service account. This service account can be found in the VM instance details in the -https://console.cloud.google.com/compute/[Compute Engine console]. - -This is the default authentication mode and requires no configuration. - -NOTE: The Compute Engine VM must be allowed to use the Storage service. This can be done only at VM -creation time, when "Storage" access can be configured to "Read/Write" permission. Check your -instance details at the section "Cloud API access scopes". +The plugin must authenticate the requests it makes to the Google Cloud Storage +service. It is common for Google client libraries to employ a strategy named https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application[application default credentials]. +However, that strategy is **not** supported for use with Elasticsearch. The +plugin operates under the Elasticsearch process, which runs with the security +manager enabled. The security manager obstructs the "automatic" credential discovery. +Therefore, you must configure <> +credentials even if you are using an environment that does not normally require +this configuration (such as Compute Engine, Kubernetes Engine or App Engine). [[repository-gcs-using-service-account]] ===== Using a Service Account -If your Elasticsearch node is not running on Compute Engine, or if you don't want to use Google's -built-in authentication mechanism, you can authenticate on the Storage service using a -https://cloud.google.com/iam/docs/overview#service_account[Service Account] file. +You have to obtain and provide https://cloud.google.com/iam/docs/overview#service_account[service account credentials] +manually. -To create a service account file: +For detailed information about generating JSON service account files, see the https://cloud.google.com/storage/docs/authentication?hl=en#service_accounts[Google Cloud documentation]. +Note that the PKCS12 format is not supported by this plugin. -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/permissions[Permission] tab -4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab -5. Click on "Create service account" -6. Once created, select the new service account and download a JSON key file +Here is a summary of the steps: -A service account file looks like this: +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/permissions[Permission] tab. +4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab. +5. Click *Create service account*. +6. After the account is created, select it and download a JSON key file. + +A JSON service account file looks like this: [source,js] ---- @@ -84,19 +79,26 @@ A service account file looks like this: "private_key_id": "...", "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", - "client_id": "..." + "client_id": "...", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://accounts.google.com/o/oauth2/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com" } ---- // NOTCONSOLE -This file must be stored in the {ref}/secure-settings.html[elasticsearch keystore], under a setting name -of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client configuration. -The default client name is `default`, but a different client name can be specified in repository -settings using `client`. +To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME` +is the name of the client configuration for the repository. The implicit client +name is `default`, but a different client name can be specified in the +repository settings with the `client` key. -For example, if specifying the credentials file in the keystore under -`gcs.client.my_alternate_client.credentials_file`, you can configure a repository to use these -credentials like this: +NOTE: Passing the file path via the GOOGLE_APPLICATION_CREDENTIALS environment +variable is **not** supported. + +For example, if you added a `gcs.client.my_alternate_client.credentials_file` +setting in the keystore, you can configure a repository to use those credentials +like this: [source,js] ---- @@ -113,19 +115,18 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] The `credentials_file` settings are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. -After you reload the settings, the internal `gcs` clients, used to transfer the -snapshot contents, will utilize the latest settings from the keystore. +After you reload the settings, the internal `gcs` clients, which are used to +transfer the snapshot contents, utilize the latest settings from the keystore. - -NOTE: In progress snapshot/restore jobs will not be preempted by a *reload* -of the client's `credentials_file` settings. They will complete using the client -as it was built when the operation started. +NOTE: Snapshot or restore jobs that are in progress are not preempted by a *reload* +of the client's `credentials_file` settings. They complete using the client as +it was built when the operation started. [[repository-gcs-client]] ==== Client Settings The client used to connect to Google Cloud Storage has a number of settings available. -Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and specified +Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and are specified inside `elasticsearch.yml`. The default client name looked up by a `gcs` repository is called `default`, but can be customized with the repository setting `client`. @@ -146,7 +147,7 @@ PUT _snapshot/my_gcs_repository // TEST[skip:we don't have gcs setup while testing this] Some settings are sensitive and must be stored in the -{ref}/secure-settings.html[elasticsearch keystore]. This is the case for the service account file: +{ref}/secure-settings.html[Elasticsearch keystore]. This is the case for the service account file: [source,sh] ---- @@ -185,7 +186,7 @@ are marked as `Secure`. `project_id`:: - The Google Cloud project id. This will be automatically infered from the credentials file but + The Google Cloud project id. This will be automatically inferred from the credentials file but can be specified explicitly. For example, it can be used to switch between projects when the same credentials are usable for both the production and the development projects. @@ -248,8 +249,8 @@ The following settings are supported: The service account used to access the bucket must have the "Writer" access to the bucket: -1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console] -2. Select your project -3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser] -4. Select the bucket and "Edit bucket permission" -5. The service account must be configured as a "User" with "Writer" access +1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. +2. Select your project. +3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser]. +4. Select the bucket and "Edit bucket permission". +5. The service account must be configured as a "User" with "Writer" access. diff --git a/docs/reference/modules/discovery/zen.asciidoc b/docs/reference/modules/discovery/zen.asciidoc index f0f26a46659..e9be7aa52e8 100644 --- a/docs/reference/modules/discovery/zen.asciidoc +++ b/docs/reference/modules/discovery/zen.asciidoc @@ -1,13 +1,12 @@ [[modules-discovery-zen]] === Zen Discovery -The zen discovery is the built in discovery module for Elasticsearch and -the default. It provides unicast discovery, but can be extended to -support cloud environments and other forms of discovery. +Zen discovery is the built-in, default, discovery module for Elasticsearch. It +provides unicast and file-based discovery, and can be extended to support cloud +environments and other forms of discovery via plugins. -The zen discovery is integrated with other modules, for example, all -communication between nodes is done using the -<> module. +Zen discovery is integrated with other modules, for example, all communication +between nodes is done using the <> module. It is separated into several sub modules, which are explained below: @@ -15,86 +14,155 @@ It is separated into several sub modules, which are explained below: [[ping]] ==== Ping -This is the process where a node uses the discovery mechanisms to find -other nodes. +This is the process where a node uses the discovery mechanisms to find other +nodes. + +[float] +[[discovery-seed-nodes]] +==== Seed nodes + +Zen discovery uses a list of _seed_ nodes in order to start off the discovery +process. At startup, or when electing a new master, Elasticsearch tries to +connect to each seed node in its list, and holds a gossip-like conversation with +them to find other nodes and to build a complete picture of the cluster. By +default there are two methods for configuring the list of seed nodes: _unicast_ +and _file-based_. It is recommended that the list of seed nodes comprises the +list of master-eligible nodes in the cluster. [float] [[unicast]] ===== Unicast -Unicast discovery requires a list of hosts to use that will act as gossip -routers. These hosts can be specified as hostnames or IP addresses; hosts -specified as hostnames are resolved to IP addresses during each round of -pinging. Note that if you are in an environment where DNS resolutions vary with -time, you might need to adjust your <>. +Unicast discovery configures a static list of hosts for use as seed nodes. +These hosts can be specified as hostnames or IP addresses; hosts specified as +hostnames are resolved to IP addresses during each round of pinging. Note that +if you are in an environment where DNS resolutions vary with time, you might +need to adjust your <>. -It is recommended that the unicast hosts list be maintained as the list of -master-eligible nodes in the cluster. +The list of hosts is set using the `discovery.zen.ping.unicast.hosts` static +setting. This is either an array of hosts or a comma-delimited string. Each +value should be in the form of `host:port` or `host` (where `port` defaults to +the setting `transport.profiles.default.port` falling back to +`transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. The +default for this setting is `127.0.0.1, [::1]` -Unicast discovery provides the following settings with the `discovery.zen.ping.unicast` prefix: +Additionally, the `discovery.zen.ping.unicast.resolve_timeout` configures the +amount of time to wait for DNS lookups on each round of pinging. This is +specified as a <> and defaults to 5s. -[cols="<,<",options="header",] -|======================================================================= -|Setting |Description -|`hosts` |Either an array setting or a comma delimited setting. Each - value should be in the form of `host:port` or `host` (where `port` defaults to the setting `transport.profiles.default.port` - falling back to `transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. Defaults to `127.0.0.1, [::1]` -|`hosts.resolve_timeout` |The amount of time to wait for DNS lookups on each round of pinging. Specified as -<>. Defaults to 5s. -|======================================================================= +Unicast discovery uses the <> module to perform the +discovery. -The unicast discovery uses the <> module to perform the discovery. +[float] +[[file-based-hosts-provider]] +===== File-based + +In addition to hosts provided by the static `discovery.zen.ping.unicast.hosts` +setting, it is possible to provide a list of hosts via an external file. +Elasticsearch reloads this file when it changes, so that the list of seed nodes +can change dynamically without needing to restart each node. For example, this +gives a convenient mechanism for an Elasticsearch instance that is run in a +Docker container to be dynamically supplied with a list of IP addresses to +connect to for Zen discovery when those IP addresses may not be known at node +startup. + +To enable file-based discovery, configure the `file` hosts provider as follows: + +[source,txt] +---------------------------------------------------------------- +discovery.zen.hosts_provider: file +---------------------------------------------------------------- + +Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in the format described +below. Any time a change is made to the `unicast_hosts.txt` file the new +changes will be picked up by Elasticsearch and the new hosts list will be used. + +Note that the file-based discovery plugin augments the unicast hosts list in +`elasticsearch.yml`: if there are valid unicast host entries in +`discovery.zen.ping.unicast.hosts` then they will be used in addition to those +supplied in `unicast_hosts.txt`. + +The `discovery.zen.ping.unicast.resolve_timeout` setting also applies to DNS +lookups for nodes specified by address via file-based discovery. This is +specified as a <> and defaults to 5s. + +The format of the file is to specify one node entry per line. Each node entry +consists of the host (host name or IP address) and an optional transport port +number. If the port number is specified, is must come immediately after the +host (on the same line) separated by a `:`. If the port number is not +specified, a default value of 9300 is used. + +For example, this is an example of `unicast_hosts.txt` for a cluster with four +nodes that participate in unicast discovery, some of which are not running on +the default port: + +[source,txt] +---------------------------------------------------------------- +10.10.10.5 +10.10.10.6:9305 +10.10.10.5:10005 +# an IPv6 address +[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301 +---------------------------------------------------------------- + +Host names are allowed instead of IP addresses (similar to +`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in +brackets with the port coming after the brackets. + +It is also possible to add comments to this file. All comments must appear on +their lines starting with `#` (i.e. comments cannot start in the middle of a +line). [float] [[master-election]] ==== Master Election -As part of the ping process a master of the cluster is either -elected or joined to. This is done automatically. The -`discovery.zen.ping_timeout` (which defaults to `3s`) determines how long the node -will wait before deciding on starting an election or joining an existing cluster. -Three pings will be sent over this timeout interval. In case where no decision can be -reached after the timeout, the pinging process restarts. -In slow or congested networks, three seconds might not be enough for a node to become -aware of the other nodes in its environment before making an election decision. -Increasing the timeout should be done with care in that case, as it will slow down the -election process. -Once a node decides to join an existing formed cluster, it -will send a join request to the master (`discovery.zen.join_timeout`) -with a timeout defaulting at 20 times the ping timeout. +As part of the ping process a master of the cluster is either elected or joined +to. This is done automatically. The `discovery.zen.ping_timeout` (which defaults +to `3s`) determines how long the node will wait before deciding on starting an +election or joining an existing cluster. Three pings will be sent over this +timeout interval. In case where no decision can be reached after the timeout, +the pinging process restarts. In slow or congested networks, three seconds +might not be enough for a node to become aware of the other nodes in its +environment before making an election decision. Increasing the timeout should +be done with care in that case, as it will slow down the election process. Once +a node decides to join an existing formed cluster, it will send a join request +to the master (`discovery.zen.join_timeout`) with a timeout defaulting at 20 +times the ping timeout. -When the master node stops or has encountered a problem, the cluster nodes -start pinging again and will elect a new master. This pinging round also -serves as a protection against (partial) network failures where a node may unjustly -think that the master has failed. In this case the node will simply hear from -other nodes about the currently active master. +When the master node stops or has encountered a problem, the cluster nodes start +pinging again and will elect a new master. This pinging round also serves as a +protection against (partial) network failures where a node may unjustly think +that the master has failed. In this case the node will simply hear from other +nodes about the currently active master. -If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from nodes that are not master -eligible (nodes where `node.master` is `false`) are ignored during master election; the default value is +If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from +nodes that are not master eligible (nodes where `node.master` is `false`) are +ignored during master election; the default value is `false`. + +Nodes can be excluded from becoming a master by setting `node.master` to `false`. -Nodes can be excluded from becoming a master by setting `node.master` to `false`. - -The `discovery.zen.minimum_master_nodes` sets the minimum -number of master eligible nodes that need to join a newly elected master in order for an election to -complete and for the elected node to accept its mastership. The same setting controls the minimum number of -active master eligible nodes that should be a part of any active cluster. If this requirement is not met the -active master node will step down and a new master election will begin. +The `discovery.zen.minimum_master_nodes` sets the minimum number of master +eligible nodes that need to join a newly elected master in order for an election +to complete and for the elected node to accept its mastership. The same setting +controls the minimum number of active master eligible nodes that should be a +part of any active cluster. If this requirement is not met the active master +node will step down and a new master election will begin. This setting must be set to a <> of your master eligible nodes. It is recommended to avoid having only two master eligible -nodes, since a quorum of two is two. Therefore, a loss of either master -eligible node will result in an inoperable cluster. +nodes, since a quorum of two is two. Therefore, a loss of either master eligible +node will result in an inoperable cluster. [float] [[fault-detection]] ==== Fault Detection -There are two fault detection processes running. The first is by the -master, to ping all the other nodes in the cluster and verify that they -are alive. And on the other end, each node pings to master to verify if -its still alive or an election process needs to be initiated. +There are two fault detection processes running. The first is by the master, to +ping all the other nodes in the cluster and verify that they are alive. And on +the other end, each node pings to master to verify if its still alive or an +election process needs to be initiated. The following settings control the fault detection process using the `discovery.zen.fd` prefix: @@ -116,19 +184,21 @@ considered failed. Defaults to `3`. The master node is the only node in a cluster that can make changes to the cluster state. The master node processes one cluster state update at a time, -applies the required changes and publishes the updated cluster state to all -the other nodes in the cluster. Each node receives the publish message, acknowledges -it, but does *not* yet apply it. If the master does not receive acknowledgement from -at least `discovery.zen.minimum_master_nodes` nodes within a certain time (controlled by -the `discovery.zen.commit_timeout` setting and defaults to 30 seconds) the cluster state -change is rejected. +applies the required changes and publishes the updated cluster state to all the +other nodes in the cluster. Each node receives the publish message, acknowledges +it, but does *not* yet apply it. If the master does not receive acknowledgement +from at least `discovery.zen.minimum_master_nodes` nodes within a certain time +(controlled by the `discovery.zen.commit_timeout` setting and defaults to 30 +seconds) the cluster state change is rejected. -Once enough nodes have responded, the cluster state is committed and a message will -be sent to all the nodes. The nodes then proceed to apply the new cluster state to their -internal state. The master node waits for all nodes to respond, up to a timeout, before -going ahead processing the next updates in the queue. The `discovery.zen.publish_timeout` is -set by default to 30 seconds and is measured from the moment the publishing started. Both -timeout settings can be changed dynamically through the <> +Once enough nodes have responded, the cluster state is committed and a message +will be sent to all the nodes. The nodes then proceed to apply the new cluster +state to their internal state. The master node waits for all nodes to respond, +up to a timeout, before going ahead processing the next updates in the queue. +The `discovery.zen.publish_timeout` is set by default to 30 seconds and is +measured from the moment the publishing started. Both timeout settings can be +changed dynamically through the <> [float] [[no-master-block]] @@ -143,10 +213,14 @@ rejected when there is no active master. The `discovery.zen.no_master_block` setting has two valid options: [horizontal] -`all`:: All operations on the node--i.e. both read & writes--will be rejected. This also applies for api cluster state -read or write operations, like the get index settings, put mapping and cluster state api. -`write`:: (default) Write operations will be rejected. Read operations will succeed, based on the last known cluster configuration. -This may result in partial reads of stale data as this node may be isolated from the rest of the cluster. +`all`:: All operations on the node--i.e. both read & writes--will be rejected. +This also applies for api cluster state read or write operations, like the get +index settings, put mapping and cluster state api. +`write`:: (default) Write operations will be rejected. Read operations will +succeed, based on the last known cluster configuration. This may result in +partial reads of stale data as this node may be isolated from the rest of the +cluster. -The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis (for example cluster stats, node info and -node stats apis). Requests to these apis will not be blocked and can run on any available node. +The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis +(for example cluster stats, node info and node stats apis). Requests to these +apis will not be blocked and can run on any available node. diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 0d4a9516871..28861d273a5 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 94161917d18..76d8f343e75 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-4.9-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-4.10-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=39e2d5803bbd5eaf6c8efe07067b0e5a00235e8c71318642b2ed262920b27721 +distributionSha256Sum=fc049dcbcb245d5892bebae143bd515a78f6a5a93cec99d489b312dc0ce4aad9 diff --git a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index e171daeb79b..3de0ae5117e 100644 --- a/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -255,6 +255,10 @@ public class JarHell { } private static void checkClass(Map clazzes, String clazz, Path jarpath) { + if (clazz.equals("module-info") || clazz.endsWith(".module-info")) { + // Ignore jigsaw module descriptions + return; + } Path previous = clazzes.put(clazz, jarpath); if (previous != null) { if (previous.equals(jarpath)) { diff --git a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index e58268ef192..95c56f94ee4 100644 --- a/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -76,6 +76,28 @@ public class JarHellTests extends ESTestCase { } } + public void testModuleInfo() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "module-info.class"), + makeJar(dir, "bar.jar", null, "module-info.class") + ), + logger::debug + ); + } + + public void testModuleInfoPackage() throws Exception { + Path dir = createTempDir(); + JarHell.checkJarHell( + asSet( + makeJar(dir, "foo.jar", null, "foo/bar/module-info.class"), + makeJar(dir, "bar.jar", null, "foo/bar/module-info.class") + ), + logger::debug + ); + } + public void testDirsOnClasspath() throws Exception { Path dir1 = createTempDir(); Path dir2 = createTempDir(); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java index f5bf9cc9591..31c0ae8cc3d 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; +import org.elasticsearch.script.ScriptService; import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException; import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty; @@ -96,6 +97,13 @@ public final class ForEachProcessor extends AbstractProcessor { } public static final class Factory implements Processor.Factory { + + private final ScriptService scriptService; + + Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + @Override public ForEachProcessor create(Map factories, String tag, Map config) throws Exception { @@ -107,7 +115,8 @@ public final class ForEachProcessor extends AbstractProcessor { throw newConfigurationException(TYPE, tag, "processor", "Must specify exactly one processor type"); } Map.Entry> entry = entries.iterator().next(); - Processor processor = ConfigurationUtils.readProcessor(factories, entry.getKey(), entry.getValue()); + Processor processor = + ConfigurationUtils.readProcessor(factories, scriptService, entry.getKey(), entry.getValue()); return new ForEachProcessor(tag, field, processor, ignoreMissing); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java index 1ed8b6058e6..8b048282814 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/IngestCommonPlugin.java @@ -72,7 +72,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl processors.put(ConvertProcessor.TYPE, new ConvertProcessor.Factory()); processors.put(GsubProcessor.TYPE, new GsubProcessor.Factory()); processors.put(FailProcessor.TYPE, new FailProcessor.Factory(parameters.scriptService)); - processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory()); + processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory(parameters.scriptService)); processors.put(DateIndexNameProcessor.TYPE, new DateIndexNameProcessor.Factory(parameters.scriptService)); processors.put(SortProcessor.TYPE, new SortProcessor.Factory()); processors.put(GrokProcessor.TYPE, new GrokProcessor.Factory(GROK_PATTERNS, createGrokThreadWatchdog(parameters))); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java index f382ad8dcfb..7ab19c4147e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ForEachProcessorFactoryTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.ingest.common; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.TestProcessor; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -30,14 +31,17 @@ import java.util.HashMap; import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; public class ForEachProcessorFactoryTests extends ESTestCase { + private final ScriptService scriptService = mock(ScriptService.class); + public void testCreate() throws Exception { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -53,7 +57,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -71,7 +75,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { Map registry = new HashMap<>(); registry.put("_first", (r, t, c) -> processor); registry.put("_second", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); @@ -84,7 +88,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { } public void testCreateWithNonExistingProcessorType() throws Exception { - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); config.put("processor", Collections.singletonMap("_name", Collections.emptyMap())); @@ -97,7 +101,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { Processor processor = new TestProcessor(ingestDocument -> { }); Map registry = new HashMap<>(); registry.put("_name", (r, t, c) -> processor); - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("processor", Collections.singletonList(Collections.singletonMap("_name", Collections.emptyMap()))); Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(registry, null, config)); @@ -105,7 +109,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase { } public void testCreateWithMissingProcessor() { - ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(); + ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService); Map config = new HashMap<>(); config.put("field", "_field"); Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(Collections.emptyMap(), null, config)); diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml new file mode 100644 index 00000000000..532519c4ca0 --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/210_conditional_processor.yml @@ -0,0 +1,81 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test conditional processor fulfilled condition": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "if" : "ctx.conditional_field == 'bar'", + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {bytes_source_field: "1kb", conditional_field: "bar"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.conditional_field: "bar" } + - match: { _source.bytes_target_field: 1024 } + +--- +"Test conditional processor unfulfilled condition": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "if" : "ctx.conditional_field == 'foo'", + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + type: test + id: 1 + pipeline: "my_pipeline" + body: {bytes_source_field: "1kb", conditional_field: "bar"} + + - do: + get: + index: test + type: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.conditional_field: "bar" } + - is_false: _source.bytes_target_field + diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index c38325edd14..7acbff6cb0b 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -61,9 +61,12 @@ public final class Whitelist { /** The {@link List} of all the whitelisted Painless classes. */ public final List whitelistClasses; + public final List whitelistBindings; + /** Standard constructor. All values must be not {@code null}. */ - public Whitelist(ClassLoader classLoader, List whitelistClasses) { + public Whitelist(ClassLoader classLoader, List whitelistClasses, List whitelistBindings) { this.classLoader = Objects.requireNonNull(classLoader); this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses)); + this.whitelistBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistBindings)); } } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java new file mode 100644 index 00000000000..364dbbb09ca --- /dev/null +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistBinding.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.spi; + +import java.util.List; +import java.util.Objects; + +/** + * A binding represents a method call that stores state. Each binding class must have exactly one + * public constructor and one public method excluding those inherited directly from {@link Object}. + * The canonical type name parameters provided must match those of the constructor and method combined. + * The constructor for a binding class will be called when the binding method is called for the first + * time at which point state may be stored for the arguments passed into the constructor. The method + * for a binding class will be called each time the binding method is called and may use the previously + * stored state. + */ +public class WhitelistBinding { + + /** Information about where this constructor was whitelisted from. */ + public final String origin; + + /** The Java class name this binding represents. */ + public final String targetJavaClassName; + + /** The method name for this binding. */ + public final String methodName; + + /** + * The canonical type name for the return type. + */ + public final String returnCanonicalTypeName; + + /** + * A {@link List} of {@link String}s that are the Painless type names for the parameters of the + * constructor which can be used to look up the Java constructor through reflection. + */ + public final List canonicalTypeNameParameters; + + /** Standard constructor. All values must be not {@code null}. */ + public WhitelistBinding(String origin, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + this.origin = Objects.requireNonNull(origin); + this.targetJavaClassName = Objects.requireNonNull(targetJavaClassName); + + this.methodName = Objects.requireNonNull(methodName); + this.returnCanonicalTypeName = Objects.requireNonNull(returnCanonicalTypeName); + this.canonicalTypeNameParameters = Objects.requireNonNull(canonicalTypeNameParameters); + } +} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java index 0b216ae5c29..7b3eb75aa3e 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistClass.java @@ -62,9 +62,8 @@ public final class WhitelistClass { /** Standard constructor. All values must be not {@code null}. */ public WhitelistClass(String origin, String javaClassName, boolean noImport, - List whitelistConstructors, - List whitelistMethods, - List whitelistFields) { + List whitelistConstructors, List whitelistMethods, List whitelistFields) + { this.origin = Objects.requireNonNull(origin); this.javaClassName = Objects.requireNonNull(javaClassName); diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java index a4a0076626a..0279c82f1b6 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistLoader.java @@ -133,6 +133,7 @@ public final class WhitelistLoader { */ public static Whitelist loadFromResourceFiles(Class resource, String... filepaths) { List whitelistClasses = new ArrayList<>(); + List whitelistBindings = new ArrayList<>(); // Execute a single pass through the whitelist text files. This will gather all the // constructors, methods, augmented methods, and fields for each whitelisted class. @@ -141,8 +142,9 @@ public final class WhitelistLoader { int number = -1; try (LineNumberReader reader = new LineNumberReader( - new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + new InputStreamReader(resource.getResourceAsStream(filepath), StandardCharsets.UTF_8))) { + String parseType = null; String whitelistClassOrigin = null; String javaClassName = null; boolean noImport = false; @@ -165,7 +167,11 @@ public final class WhitelistLoader { // Ensure the final token of the line is '{'. if (line.endsWith("{") == false) { throw new IllegalArgumentException( - "invalid class definition: failed to parse class opening bracket [" + line + "]"); + "invalid class definition: failed to parse class opening bracket [" + line + "]"); + } + + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed class definition [" + line + "]"); } // Parse the Java class name. @@ -178,6 +184,7 @@ public final class WhitelistLoader { throw new IllegalArgumentException("invalid class definition: failed to parse class name [" + line + "]"); } + parseType = "class"; whitelistClassOrigin = "[" + filepath + "]:[" + number + "]"; javaClassName = tokens[0]; @@ -185,34 +192,117 @@ public final class WhitelistLoader { whitelistConstructors = new ArrayList<>(); whitelistMethods = new ArrayList<>(); whitelistFields = new ArrayList<>(); - - // Handle the end of a class, by creating a new WhitelistClass with all the previously gathered - // constructors, methods, augmented methods, and fields, and adding it to the list of whitelisted classes. - // Expects the following format: '}' '\n' - } else if (line.equals("}")) { - if (javaClassName == null) { - throw new IllegalArgumentException("invalid class definition: extraneous closing bracket"); + } else if (line.startsWith("static ")) { + // Ensure the final token of the line is '{'. + if (line.endsWith("{") == false) { + throw new IllegalArgumentException( + "invalid static definition: failed to parse static opening bracket [" + line + "]"); } - whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, - whitelistConstructors, whitelistMethods, whitelistFields)); + if (parseType != null) { + throw new IllegalArgumentException("invalid definition: cannot embed static definition [" + line + "]"); + } - // Set all the variables to null to ensure a new class definition is found before other parsable values. - whitelistClassOrigin = null; - javaClassName = null; - noImport = false; - whitelistConstructors = null; - whitelistMethods = null; - whitelistFields = null; + parseType = "static"; - // Handle all other valid cases. - } else { + // Handle the end of a definition and reset all previously gathered values. + // Expects the following format: '}' '\n' + } else if (line.equals("}")) { + if (parseType == null) { + throw new IllegalArgumentException("invalid definition: extraneous closing bracket"); + } + + // Create a new WhitelistClass with all the previously gathered constructors, methods, + // augmented methods, and fields, and add it to the list of whitelisted classes. + if ("class".equals(parseType)) { + whitelistClasses.add(new WhitelistClass(whitelistClassOrigin, javaClassName, noImport, + whitelistConstructors, whitelistMethods, whitelistFields)); + + whitelistClassOrigin = null; + javaClassName = null; + noImport = false; + whitelistConstructors = null; + whitelistMethods = null; + whitelistFields = null; + } + + // Reset the parseType. + parseType = null; + + // Handle static definition types. + // Expects the following format: ID ID '(' ( ID ( ',' ID )* )? ')' 'bound_to' ID '\n' + } else if ("static".equals(parseType)) { + // Mark the origin of this parsable object. + String origin = "[" + filepath + "]:[" + number + "]"; + + // Parse the tokens prior to the method parameters. + int parameterStartIndex = line.indexOf('('); + + if (parameterStartIndex == -1) { + throw new IllegalArgumentException( + "illegal static definition: start of method parameters not found [" + line + "]"); + } + + String[] tokens = line.substring(0, parameterStartIndex).trim().split("\\s+"); + + String methodName; + + // Based on the number of tokens, look up the Java method name. + if (tokens.length == 2) { + methodName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); + } + + String returnCanonicalTypeName = tokens[0]; + + // Parse the method parameters. + int parameterEndIndex = line.indexOf(')'); + + if (parameterEndIndex == -1) { + throw new IllegalArgumentException( + "illegal static definition: end of method parameters not found [" + line + "]"); + } + + String[] canonicalTypeNameParameters = + line.substring(parameterStartIndex + 1, parameterEndIndex).replaceAll("\\s+", "").split(","); + + // Handle the case for a method with no parameters. + if ("".equals(canonicalTypeNameParameters[0])) { + canonicalTypeNameParameters = new String[0]; + } + + // Parse the static type and class. + tokens = line.substring(parameterEndIndex + 1).trim().split("\\s+"); + + String staticType; + String targetJavaClassName; + + // Based on the number of tokens, look up the type and class. + if (tokens.length == 2) { + staticType = tokens[0]; + targetJavaClassName = tokens[1]; + } else { + throw new IllegalArgumentException("invalid static definition: unexpected format [" + line + "]"); + } + + // Check the static type is valid. + if ("bound_to".equals(staticType) == false) { + throw new IllegalArgumentException( + "invalid static definition: unexpected static type [" + staticType + "] [" + line + "]"); + } + + whitelistBindings.add(new WhitelistBinding(origin, targetJavaClassName, + methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters))); + + // Handle class definition types. + } else if ("class".equals(parseType)) { // Mark the origin of this parsable object. String origin = "[" + filepath + "]:[" + number + "]"; // Ensure we have a defined class before adding any constructors, methods, augmented methods, or fields. - if (javaClassName == null) { - throw new IllegalArgumentException("invalid object definition: expected a class name [" + line + "]"); + if (parseType == null) { + throw new IllegalArgumentException("invalid definition: expected one of ['class', 'static'] [" + line + "]"); } // Handle the case for a constructor definition. @@ -221,7 +311,7 @@ public final class WhitelistLoader { // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid constructor definition: expected a closing parenthesis [" + line + "]"); + "invalid constructor definition: expected a closing parenthesis [" + line + "]"); } // Parse the constructor parameters. @@ -234,34 +324,34 @@ public final class WhitelistLoader { whitelistConstructors.add(new WhitelistConstructor(origin, Arrays.asList(tokens))); - // Handle the case for a method or augmented method definition. - // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' + // Handle the case for a method or augmented method definition. + // Expects the following format: ID ID? ID '(' ( ID ( ',' ID )* )? ')' '\n' } else if (line.contains("(")) { // Ensure the final token of the line is ')'. if (line.endsWith(")") == false) { throw new IllegalArgumentException( - "invalid method definition: expected a closing parenthesis [" + line + "]"); + "invalid method definition: expected a closing parenthesis [" + line + "]"); } // Parse the tokens prior to the method parameters. int parameterIndex = line.indexOf('('); - String[] tokens = line.trim().substring(0, parameterIndex).split("\\s+"); + String[] tokens = line.substring(0, parameterIndex).trim().split("\\s+"); - String javaMethodName; + String methodName; String javaAugmentedClassName; // Based on the number of tokens, look up the Java method name and if provided the Java augmented class. if (tokens.length == 2) { - javaMethodName = tokens[1]; + methodName = tokens[1]; javaAugmentedClassName = null; } else if (tokens.length == 3) { - javaMethodName = tokens[2]; + methodName = tokens[2]; javaAugmentedClassName = tokens[1]; } else { throw new IllegalArgumentException("invalid method definition: unexpected format [" + line + "]"); } - String painlessReturnTypeName = tokens[0]; + String returnCanonicalTypeName = tokens[0]; // Parse the method parameters. tokens = line.substring(parameterIndex + 1, line.length() - 1).replaceAll("\\s+", "").split(","); @@ -271,11 +361,11 @@ public final class WhitelistLoader { tokens = new String[0]; } - whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, javaMethodName, - painlessReturnTypeName, Arrays.asList(tokens))); + whitelistMethods.add(new WhitelistMethod(origin, javaAugmentedClassName, methodName, + returnCanonicalTypeName, Arrays.asList(tokens))); - // Handle the case for a field definition. - // Expects the following format: ID ID '\n' + // Handle the case for a field definition. + // Expects the following format: ID ID '\n' } else { // Parse the field tokens. String[] tokens = line.split("\\s+"); @@ -287,20 +377,23 @@ public final class WhitelistLoader { whitelistFields.add(new WhitelistField(origin, tokens[1], tokens[0])); } + } else { + throw new IllegalArgumentException("invalid definition: unable to parse line [" + line + "]"); } } // Ensure all classes end with a '}' token before the end of the file. if (javaClassName != null) { - throw new IllegalArgumentException("invalid class definition: expected closing bracket"); + throw new IllegalArgumentException("invalid definition: expected closing bracket"); } } catch (Exception exception) { throw new RuntimeException("error in [" + filepath + "] at line [" + number + "]", exception); } } + ClassLoader loader = AccessController.doPrivileged((PrivilegedAction)resource::getClassLoader); - return new Whitelist(loader, whitelistClasses); + return new Whitelist(loader, whitelistClasses, whitelistBindings); } private WhitelistLoader() {} diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java index 5cd023a3591..f450ee0238d 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/WhitelistMethod.java @@ -67,7 +67,8 @@ public class WhitelistMethod { * is augmented as described in the class documentation. */ public WhitelistMethod(String origin, String augmentedCanonicalClassName, String methodName, - String returnCanonicalTypeName, List canonicalTypeNameParameters) { + String returnCanonicalTypeName, List canonicalTypeNameParameters) { + this.origin = Objects.requireNonNull(origin); this.augmentedCanonicalClassName = augmentedCanonicalClassName; this.methodName = methodName; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java new file mode 100644 index 00000000000..fc2a10891f6 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/BindingTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +public class BindingTest { + public int state; + + public BindingTest(int state0, int state1) { + this.state = state0 + state1; + } + + public int testAddWithState(int istateless, double dstateless) { + return istateless + state + (int)dstateless; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java index 83eb74d827f..d18cf2780cf 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Globals.java @@ -31,6 +31,7 @@ import java.util.Map; public class Globals { private final Map syntheticMethods = new HashMap<>(); private final Map constantInitializers = new HashMap<>(); + private final Map> bindings = new HashMap<>(); private final BitSet statements; /** Create a new Globals from the set of statement boundaries */ @@ -54,7 +55,15 @@ public class Globals { throw new IllegalStateException("constant initializer: " + constant.name + " already exists"); } } - + + /** Adds a new binding to be written as a local variable */ + public String addBinding(Class type) { + String name = "$binding$" + bindings.size(); + bindings.put(name, type); + + return name; + } + /** Returns the current synthetic methods */ public Map getSyntheticMethods() { return syntheticMethods; @@ -64,7 +73,12 @@ public class Globals { public Map getConstantInitializers() { return constantInitializers; } - + + /** Returns the current bindings */ + public Map> getBindings() { + return bindings; + } + /** Returns the set of statement boundaries */ public BitSet getStatements() { return statements; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java new file mode 100644 index 00000000000..41178dd5d75 --- /dev/null +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessBinding.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless.lookup; + +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.util.List; + +public class PainlessBinding { + + public final Constructor javaConstructor; + public final Method javaMethod; + + public final Class returnType; + public final List> typeParameters; + + PainlessBinding(Constructor javaConstructor, Method javaMethod, Class returnType, List> typeParameters) { + this.javaConstructor = javaConstructor; + this.javaMethod = javaMethod; + + this.returnType = returnType; + this.typeParameters = typeParameters; + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java index 50bb79dcfbd..f5d6c97bb2f 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClass.java @@ -24,6 +24,7 @@ import java.util.Collections; import java.util.Map; public final class PainlessClass { + public final Map constructors; public final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java index a61215e9ed7..92100d1bda0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessClassBuilder.java @@ -24,6 +24,7 @@ import java.util.HashMap; import java.util.Map; final class PainlessClassBuilder { + final Map constructors; final Map staticMethods; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java index 76597c1a29d..a3dc6c8122b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessConstructor.java @@ -25,6 +25,7 @@ import java.lang.reflect.Constructor; import java.util.List; public class PainlessConstructor { + public final Constructor javaConstructor; public final List> typeParameters; public final MethodHandle methodHandle; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java index a55d6c3730e..9567e97331c 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessField.java @@ -23,6 +23,7 @@ import java.lang.invoke.MethodHandle; import java.lang.reflect.Field; public final class PainlessField { + public final Field javaField; public final Class typeParameter; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java index 55855a3cb1e..2d6ed3e361d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookup.java @@ -37,12 +37,17 @@ public final class PainlessLookup { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClass> classesToPainlessClasses; - PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses) { + private final Map painlessMethodKeysToPainlessBindings; + + PainlessLookup(Map> canonicalClassNamesToClasses, Map, PainlessClass> classesToPainlessClasses, + Map painlessMethodKeysToPainlessBindings) { Objects.requireNonNull(canonicalClassNamesToClasses); Objects.requireNonNull(classesToPainlessClasses); this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses); this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses); + + this.painlessMethodKeysToPainlessBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessBindings); } public boolean isValidCanonicalClassName(String canonicalClassName) { @@ -162,6 +167,14 @@ public final class PainlessLookup { return painlessField; } + public PainlessBinding lookupPainlessBinding(String methodName, int arity) { + Objects.requireNonNull(methodName); + + String painlessMethodKey = buildPainlessMethodKey(methodName, arity); + + return painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + } + public PainlessMethod lookupFunctionalInterfacePainlessMethod(Class targetClass) { PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java index c8353b54c9f..a64814f8661 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessLookupBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless.lookup; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.painless.spi.WhitelistBinding; import org.elasticsearch.painless.spi.WhitelistClass; import org.elasticsearch.painless.spi.WhitelistConstructor; import org.elasticsearch.painless.spi.WhitelistField; @@ -52,11 +53,11 @@ public final class PainlessLookupBuilder { private static class PainlessConstructorCacheKey { - private final Class targetType; + private final Class targetClass; private final List> typeParameters; - private PainlessConstructorCacheKey(Class targetType, List> typeParameters) { - this.targetType = targetType; + private PainlessConstructorCacheKey(Class targetClass, List> typeParameters) { + this.targetClass = targetClass; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -72,25 +73,27 @@ public final class PainlessLookupBuilder { PainlessConstructorCacheKey that = (PainlessConstructorCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, typeParameters); + return Objects.hash(targetClass, typeParameters); } } private static class PainlessMethodCacheKey { - private final Class targetType; + private final Class targetClass; private final String methodName; + private final Class returnType; private final List> typeParameters; - private PainlessMethodCacheKey(Class targetType, String methodName, List> typeParameters) { - this.targetType = targetType; + private PainlessMethodCacheKey(Class targetClass, String methodName, Class returnType, List> typeParameters) { + this.targetClass = targetClass; this.methodName = methodName; + this.returnType = returnType; this.typeParameters = Collections.unmodifiableList(typeParameters); } @@ -106,25 +109,26 @@ public final class PainlessLookupBuilder { PainlessMethodCacheKey that = (PainlessMethodCacheKey)object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(methodName, that.methodName) && + Objects.equals(returnType, that.returnType) && Objects.equals(typeParameters, that.typeParameters); } @Override public int hashCode() { - return Objects.hash(targetType, methodName, typeParameters); + return Objects.hash(targetClass, methodName, returnType, typeParameters); } } private static class PainlessFieldCacheKey { - private final Class targetType; + private final Class targetClass; private final String fieldName; private final Class typeParameter; - private PainlessFieldCacheKey(Class targetType, String fieldName, Class typeParameter) { - this.targetType = targetType; + private PainlessFieldCacheKey(Class targetClass, String fieldName, Class typeParameter) { + this.targetClass = targetClass; this.fieldName = fieldName; this.typeParameter = typeParameter; } @@ -141,20 +145,61 @@ public final class PainlessLookupBuilder { PainlessFieldCacheKey that = (PainlessFieldCacheKey) object; - return Objects.equals(targetType, that.targetType) && + return Objects.equals(targetClass, that.targetClass) && Objects.equals(fieldName, that.fieldName) && Objects.equals(typeParameter, that.typeParameter); } @Override public int hashCode() { - return Objects.hash(targetType, fieldName, typeParameter); + return Objects.hash(targetClass, fieldName, typeParameter); } } - private static final Map painlessConstuctorCache = new HashMap<>(); - private static final Map painlessMethodCache = new HashMap<>(); - private static final Map painlessFieldCache = new HashMap<>(); + private static class PainlessBindingCacheKey { + + private final Class targetClass; + private final String methodName; + private final Class methodReturnType; + private final List> methodTypeParameters; + + private PainlessBindingCacheKey(Class targetClass, + String methodName, Class returnType, List> typeParameters) { + + this.targetClass = targetClass; + this.methodName = methodName; + this.methodReturnType = returnType; + this.methodTypeParameters = Collections.unmodifiableList(typeParameters); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PainlessBindingCacheKey that = (PainlessBindingCacheKey)object; + + return Objects.equals(targetClass, that.targetClass) && + Objects.equals(methodName, that.methodName) && + Objects.equals(methodReturnType, that.methodReturnType) && + Objects.equals(methodTypeParameters, that.methodTypeParameters); + } + + @Override + public int hashCode() { + return Objects.hash(targetClass, methodName, methodReturnType, methodTypeParameters); + } + } + + private static final Map painlessConstructorCache = new HashMap<>(); + private static final Map painlessMethodCache = new HashMap<>(); + private static final Map painlessFieldCache = new HashMap<>(); + private static final Map painlessBindingCache = new HashMap<>(); private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$"); @@ -197,6 +242,14 @@ public final class PainlessLookupBuilder { targetCanonicalClassName, whitelistField.fieldName, whitelistField.canonicalTypeNameParameter); } } + + for (WhitelistBinding whitelistBinding : whitelist.whitelistBindings) { + origin = whitelistBinding.origin; + painlessLookupBuilder.addPainlessBinding( + whitelist.classLoader, whitelistBinding.targetJavaClassName, + whitelistBinding.methodName, whitelistBinding.returnCanonicalTypeName, + whitelistBinding.canonicalTypeNameParameters); + } } } catch (Exception exception) { throw new IllegalArgumentException("error loading whitelist(s) " + origin, exception); @@ -208,9 +261,13 @@ public final class PainlessLookupBuilder { private final Map> canonicalClassNamesToClasses; private final Map, PainlessClassBuilder> classesToPainlessClassBuilders; + private final Map painlessMethodKeysToPainlessBindings; + public PainlessLookupBuilder() { canonicalClassNamesToClasses = new HashMap<>(); classesToPainlessClassBuilders = new HashMap<>(); + + painlessMethodKeysToPainlessBindings = new HashMap<>(); } private Class canonicalTypeNameToType(String canonicalTypeName) { @@ -392,7 +449,7 @@ public final class PainlessLookupBuilder { MethodType methodType = methodHandle.type(); - painlessConstructor = painlessConstuctorCache.computeIfAbsent( + painlessConstructor = painlessConstructorCache.computeIfAbsent( new PainlessConstructorCacheKey(targetClass, typeParameters), key -> new PainlessConstructor(javaConstructor, typeParameters, methodHandle, methodType) ); @@ -439,7 +496,7 @@ public final class PainlessLookupBuilder { Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); if (typeParameter == null) { - throw new IllegalArgumentException("parameter type [" + canonicalTypeNameParameter + "] not found for method " + + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } @@ -449,7 +506,7 @@ public final class PainlessLookupBuilder { Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); if (returnType == null) { - throw new IllegalArgumentException("parameter type [" + returnCanonicalTypeName + "] not found for method " + + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for method " + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); } @@ -548,7 +605,7 @@ public final class PainlessLookupBuilder { MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.staticMethods.put(painlessMethodKey, painlessMethod); @@ -588,7 +645,7 @@ public final class PainlessLookupBuilder { MethodType methodType = methodHandle.type(); painlessMethod = painlessMethodCache.computeIfAbsent( - new PainlessMethodCacheKey(targetClass, methodName, typeParameters), + new PainlessMethodCacheKey(targetClass, methodName, returnType, typeParameters), key -> new PainlessMethod(javaMethod, targetClass, returnType, typeParameters, methodHandle, methodType)); painlessClassBuilder.methods.put(painlessMethodKey, painlessMethod); @@ -731,6 +788,183 @@ public final class PainlessLookupBuilder { } } + public void addPainlessBinding(ClassLoader classLoader, String targetJavaClassName, + String methodName, String returnCanonicalTypeName, List canonicalTypeNameParameters) { + + Objects.requireNonNull(classLoader); + Objects.requireNonNull(targetJavaClassName); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnCanonicalTypeName); + Objects.requireNonNull(canonicalTypeNameParameters); + + Class targetClass; + + try { + targetClass = Class.forName(targetJavaClassName, true, classLoader); + } catch (ClassNotFoundException cnfe) { + throw new IllegalArgumentException("class [" + targetJavaClassName + "] not found", cnfe); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + List> typeParameters = new ArrayList<>(canonicalTypeNameParameters.size()); + + for (String canonicalTypeNameParameter : canonicalTypeNameParameters) { + Class typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter); + + if (typeParameter == null) { + throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + typeParameters.add(typeParameter); + } + + Class returnType = canonicalTypeNameToType(returnCanonicalTypeName); + + if (returnType == null) { + throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for binding " + + "[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]"); + } + + addPainlessBinding(targetClass, methodName, returnType, typeParameters); + } + + public void addPainlessBinding(Class targetClass, String methodName, Class returnType, List> typeParameters) { + + Objects.requireNonNull(targetClass); + Objects.requireNonNull(methodName); + Objects.requireNonNull(returnType); + Objects.requireNonNull(typeParameters); + + if (targetClass == def.class) { + throw new IllegalArgumentException("cannot add binding as reserved class [" + DEF_CLASS_NAME + "]"); + } + + String targetCanonicalClassName = typeToCanonicalTypeName(targetClass); + + Constructor[] javaConstructors = targetClass.getConstructors(); + Constructor javaConstructor = null; + + for (Constructor eachJavaConstructor : javaConstructors) { + if (eachJavaConstructor.getDeclaringClass() == targetClass) { + if (javaConstructor != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple constructors"); + } + + javaConstructor = eachJavaConstructor; + } + } + + if (javaConstructor == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one constructor"); + } + + int constructorTypeParametersSize = javaConstructor.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < constructorTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaConstructor.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) { + throw new IllegalArgumentException( + "invalid method name [" + methodName + "] for binding [" + targetCanonicalClassName + "]."); + } + + Method[] javaMethods = targetClass.getMethods(); + Method javaMethod = null; + + for (Method eachJavaMethod : javaMethods) { + if (eachJavaMethod.getDeclaringClass() == targetClass) { + if (javaMethod != null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple methods"); + } + + javaMethod = eachJavaMethod; + } + } + + if (javaMethod == null) { + throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one method"); + } + + int methodTypeParametersSize = javaMethod.getParameterCount(); + + for (int typeParameterIndex = 0; typeParameterIndex < methodTypeParametersSize; ++typeParameterIndex) { + Class typeParameter = typeParameters.get(constructorTypeParametersSize + typeParameterIndex); + + if (isValidType(typeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + Class javaTypeParameter = javaMethod.getParameterTypes()[typeParameterIndex]; + + if (isValidType(javaTypeParameter) == false) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " + + "for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + if (javaTypeParameter != typeToJavaType(typeParameter)) { + throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " + + "does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]"); + } + } + + if (javaMethod.getReturnType() != typeToJavaType(returnType)) { + throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " + + "does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " + + "for binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " + + typesToCanonicalTypeNames(typeParameters) + "]"); + } + + String painlessMethodKey = buildPainlessMethodKey(methodName, constructorTypeParametersSize + methodTypeParametersSize); + PainlessBinding painlessBinding = painlessMethodKeysToPainlessBindings.get(painlessMethodKey); + + if (painlessBinding == null) { + Constructor finalJavaConstructor = javaConstructor; + Method finalJavaMethod = javaMethod; + + painlessBinding = painlessBindingCache.computeIfAbsent( + new PainlessBindingCacheKey(targetClass, methodName, returnType, typeParameters), + key -> new PainlessBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters)); + + painlessMethodKeysToPainlessBindings.put(painlessMethodKey, painlessBinding); + } else if (painlessBinding.javaConstructor.equals(javaConstructor) == false || + painlessBinding.javaMethod.equals(javaMethod) == false || + painlessBinding.returnType != returnType || + painlessBinding.typeParameters.equals(typeParameters) == false) { + throw new IllegalArgumentException("cannot have bindings " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(returnType) + "], " + + typesToCanonicalTypeNames(typeParameters) + "] and " + + "[[" + targetCanonicalClassName + "], " + + "[" + methodName + "], " + + "[" + typeToCanonicalTypeName(painlessBinding.returnType) + "], " + + typesToCanonicalTypeNames(painlessBinding.typeParameters) + "] and " + + "with the same name and arity but different constructors or methods"); + } + } + public PainlessLookup build() { copyPainlessClassMembers(); cacheRuntimeHandles(); @@ -742,7 +976,7 @@ public final class PainlessLookupBuilder { classesToPainlessClasses.put(painlessClassBuilderEntry.getKey(), painlessClassBuilderEntry.getValue().build()); } - return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses); + return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses, painlessMethodKeysToPainlessBindings); } private void copyPainlessClassMembers() { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java index 9dd143a4028..89462170ae5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/PainlessMethod.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; public class PainlessMethod { + public final Method javaMethod; public final Class targetClass; public final Class returnType; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java index 1f9973df192..8ae6ad9723d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ECallLocal.java @@ -24,8 +24,12 @@ import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Locals.LocalMethod; import org.elasticsearch.painless.Location; import org.elasticsearch.painless.MethodWriter; +import org.elasticsearch.painless.lookup.PainlessBinding; +import org.objectweb.asm.Label; +import org.objectweb.asm.Type; import org.objectweb.asm.commons.Method; +import java.util.ArrayList; import java.util.List; import java.util.Objects; import java.util.Set; @@ -41,6 +45,7 @@ public final class ECallLocal extends AExpression { private final List arguments; private LocalMethod method = null; + private PainlessBinding binding = null; public ECallLocal(Location location, String name, List arguments) { super(location); @@ -60,32 +65,71 @@ public final class ECallLocal extends AExpression { void analyze(Locals locals) { method = locals.getMethod(name, arguments.size()); + if (method == null) { - throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + binding = locals.getPainlessLookup().lookupPainlessBinding(name, arguments.size()); + + if (binding == null) { + throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments.")); + } } + List> typeParameters = new ArrayList<>(method == null ? binding.typeParameters : method.typeParameters); + for (int argument = 0; argument < arguments.size(); ++argument) { AExpression expression = arguments.get(argument); - expression.expected = method.typeParameters.get(argument); + expression.expected = typeParameters.get(argument); expression.internal = true; expression.analyze(locals); arguments.set(argument, expression.cast(locals)); } statement = true; - actual = method.returnType; + actual = method == null ? binding.returnType : method.returnType; } @Override void write(MethodWriter writer, Globals globals) { writer.writeDebugInfo(location); - for (AExpression argument : arguments) { - argument.write(writer, globals); - } + if (method == null) { + String name = globals.addBinding(binding.javaConstructor.getDeclaringClass()); + Type type = Type.getType(binding.javaConstructor.getDeclaringClass()); + int javaConstructorParameterCount = binding.javaConstructor.getParameterCount(); - writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + Label nonNull = new Label(); + + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + writer.ifNonNull(nonNull); + writer.loadThis(); + writer.newInstance(type); + writer.dup(); + + for (int argument = 0; argument < javaConstructorParameterCount; ++argument) { + arguments.get(argument).write(writer, globals); + } + + writer.invokeConstructor(type, Method.getMethod(binding.javaConstructor)); + writer.putField(CLASS_TYPE, name, type); + + writer.mark(nonNull); + writer.loadThis(); + writer.getField(CLASS_TYPE, name, type); + + for (int argument = 0; argument < binding.javaMethod.getParameterCount(); ++argument) { + arguments.get(argument + javaConstructorParameterCount).write(writer, globals); + } + + writer.invokeVirtual(type, Method.getMethod(binding.javaMethod)); + } else { + for (AExpression argument : arguments) { + argument.write(writer, globals); + } + + writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString())); + } } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 0f7445a38c4..8abd3c7185d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -359,6 +359,13 @@ public final class SSource extends AStatement { clinit.endMethod(); } + // Write binding variables + for (Map.Entry> binding : globals.getBindings().entrySet()) { + String name = binding.getKey(); + String descriptor = Type.getType(binding.getValue()).getDescriptor(); + visitor.visitField(Opcodes.ACC_PRIVATE, name, descriptor, null, null).visitEnd(); + } + // Write any needsVarName methods for used variables for (org.objectweb.asm.commons.Method needsMethod : scriptClassInfo.getNeedsMethods()) { String name = needsMethod.getName(); diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index a3ff479533b..444234384c6 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -132,24 +132,6 @@ class org.elasticsearch.index.mapper.IpFieldMapper$IpFieldType$IpScriptDocValues List getValues() } -# for testing. -# currently FeatureTest exposes overloaded constructor, field load store, and overloaded static methods -class org.elasticsearch.painless.FeatureTest no_import { - int z - () - (int,int) - int getX() - int getY() - void setX(int) - void setY(int) - boolean overloadedStatic() - boolean overloadedStatic(boolean) - Object twoFunctionsOfX(Function,Function) - void listInput(List) - int org.elasticsearch.painless.FeatureTestAugmentation getTotal() - int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) -} - class org.elasticsearch.search.lookup.FieldLookup { def getValue() List getValues() @@ -174,4 +156,26 @@ class org.elasticsearch.index.similarity.ScriptedSimilarity$Term { class org.elasticsearch.index.similarity.ScriptedSimilarity$Doc { int getLength() float getFreq() +} + +# for testing +class org.elasticsearch.painless.FeatureTest no_import { + int z + () + (int,int) + int getX() + int getY() + void setX(int) + void setY(int) + boolean overloadedStatic() + boolean overloadedStatic(boolean) + Object twoFunctionsOfX(Function,Function) + void listInput(List) + int org.elasticsearch.painless.FeatureTestAugmentation getTotal() + int org.elasticsearch.painless.FeatureTestAugmentation addToTotal(int) +} + +# for testing +static { + int testAddWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingTest } \ No newline at end of file diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java new file mode 100644 index 00000000000..4bcc557d3dc --- /dev/null +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BindingsTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.painless; + +import org.elasticsearch.script.ExecutableScript; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class BindingsTests extends ScriptTestCase { + + public void testBasicBinding() { + assertEquals(15, exec("testAddWithState(4, 5, 6, 0.0)")); + } + + public void testRepeatedBinding() { + String script = "testAddWithState(4, 5, params.test, 0.0)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + assertEquals(14, executableScript.run()); + + executableScript.setNextVar("test", 4); + assertEquals(13, executableScript.run()); + + executableScript.setNextVar("test", 7); + assertEquals(16, executableScript.run()); + } + + public void testBoundBinding() { + String script = "testAddWithState(4, params.bound, params.test, 0.0)"; + Map params = new HashMap<>(); + ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap()); + ExecutableScript executableScript = factory.newInstance(params); + + executableScript.setNextVar("test", 5); + executableScript.setNextVar("bound", 1); + assertEquals(10, executableScript.run()); + + executableScript.setNextVar("test", 4); + executableScript.setNextVar("bound", 2); + assertEquals(9, executableScript.run()); + } +} diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 12ce5ce7d4a..e7c36ff506e 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -83,7 +83,6 @@ thirdPartyAudit.excludes = [ 'io.netty.internal.tcnative.SSLContext', // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) - 'org.bouncycastle.asn1.x500.X500Name', 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', @@ -163,3 +162,11 @@ thirdPartyAudit.excludes = [ 'org.conscrypt.Conscrypt', 'org.conscrypt.HandshakeListener' ] + +if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.excludes += [ + 'org.bouncycastle.asn1.x500.X500Name' + ] +} diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java index 4d264470785..48fa49b9a8a 100644 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java +++ b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPlugin.java @@ -19,39 +19,33 @@ package org.elasticsearch.discovery.file; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.TransportService; -import java.nio.file.Path; import java.util.Collections; import java.util.Map; import java.util.function.Supplier; -/** - * Plugin for providing file-based unicast hosts discovery. The list of unicast hosts - * is obtained by reading the {@link FileBasedUnicastHostsProvider#UNICAST_HOSTS_FILE} in - * the {@link Environment#configFile()}/discovery-file directory. - */ public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin { - private final Settings settings; - private final Path configPath; + private final DeprecationLogger deprecationLogger; + static final String DEPRECATION_MESSAGE + = "File-based discovery is now built into Elasticsearch and does not require the discovery-file plugin"; - public FileBasedDiscoveryPlugin(Settings settings, Path configPath) { - this.settings = settings; - this.configPath = configPath; + public FileBasedDiscoveryPlugin(Settings settings) { + deprecationLogger = new DeprecationLogger(Loggers.getLogger(this.getClass(), settings)); } @Override public Map> getZenHostsProviders(TransportService transportService, NetworkService networkService) { - return Collections.singletonMap( - "file", - () -> new FileBasedUnicastHostsProvider(new Environment(settings, configPath))); + deprecationLogger.deprecated(DEPRECATION_MESSAGE); + return Collections.emptyMap(); } } diff --git a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java b/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java deleted file mode 100644 index 584ae4de5a2..00000000000 --- a/plugins/discovery-file/src/main/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProvider.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.file; - -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.zen.UnicastHostsProvider; -import org.elasticsearch.env.Environment; - -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.NoSuchFileException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * An implementation of {@link UnicastHostsProvider} that reads hosts/ports - * from {@link #UNICAST_HOSTS_FILE}. - * - * Each unicast host/port that is part of the discovery process must be listed on - * a separate line. If the port is left off an entry, a default port of 9300 is - * assumed. An example unicast hosts file could read: - * - * 67.81.244.10 - * 67.81.244.11:9305 - * 67.81.244.15:9400 - */ -class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { - - static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; - - private final Path unicastHostsFilePath; - - FileBasedUnicastHostsProvider(Environment environment) { - super(environment.settings()); - this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); - } - - @Override - public List buildDynamicHosts(HostsResolver hostsResolver) { - List hostsList; - try (Stream lines = Files.lines(unicastHostsFilePath)) { - hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments - .collect(Collectors.toList()); - } catch (FileNotFoundException | NoSuchFileException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Failed to find unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[discovery-file] Error reading unicast hosts file [{}]", - unicastHostsFilePath), e); - hostsList = Collections.emptyList(); - } - - final List dynamicHosts = hostsResolver.resolveHosts(hostsList, 1); - logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts); - return dynamicHosts; - } - -} diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java new file mode 100644 index 00000000000..643c7b2c95c --- /dev/null +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedDiscoveryPluginDeprecationTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.file; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin.DEPRECATION_MESSAGE; + +public class FileBasedDiscoveryPluginDeprecationTests extends ESTestCase { + public void testDeprecationWarning() { + new FileBasedDiscoveryPlugin(Settings.EMPTY).getZenHostsProviders(null, null); + assertWarnings(DEPRECATION_MESSAGE); + } +} diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 6cd55f682c8..f55104f2a96 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -2141,3 +2141,9 @@ if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { 'javax.xml.bind.Unmarshaller' ] } + +if (project.inFipsJvm) { + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false +} diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 07605bfee29..cb8916b857c 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -62,7 +62,6 @@ thirdPartyAudit.excludes = [ 'io.netty.internal.tcnative.SSLContext', // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) - 'org.bouncycastle.asn1.x500.X500Name', 'org.bouncycastle.cert.X509v3CertificateBuilder', 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', @@ -141,4 +140,11 @@ thirdPartyAudit.excludes = [ 'org.conscrypt.BufferAllocator', 'org.conscrypt.Conscrypt', 'org.conscrypt.HandshakeListener' -] \ No newline at end of file +] +if (project.inFipsJvm == false) { + // BouncyCastleFIPS provides this class, so the exclusion is invalid when running CI in + // a FIPS JVM with BouncyCastleFIPS Provider + thirdPartyAudit.excludes += [ + 'org.bouncycastle.asn1.x500.X500Name' + ] +} \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 58efce77c9f..b3ec72d5270 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -131,9 +131,7 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; -import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction; -import org.elasticsearch.action.admin.indices.shrink.TransportShrinkAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; @@ -446,7 +444,6 @@ public class ActionModule extends AbstractModule { actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); - actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class); actions.register(ResizeAction.INSTANCE, TransportResizeAction.class); actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index f2e07e29bad..25f7f33647c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -55,8 +55,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private final Set blocks = new HashSet<>(); private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; @@ -83,11 +81,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return this; } - public CreateIndexClusterStateUpdateRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public CreateIndexClusterStateUpdateRequest blocks(Set blocks) { this.blocks.addAll(blocks); return this; @@ -146,10 +139,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ return aliases; } - public Map customs() { - return customs; - } - public Set blocks() { return blocks; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 875d17eb54b..a186f9b5011 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -58,9 +57,9 @@ import java.util.Objects; import java.util.Set; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}. @@ -87,8 +86,6 @@ public class CreateIndexRequest extends AcknowledgedRequest private final Set aliases = new HashSet<>(); - private final Map customs = new HashMap<>(); - private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public CreateIndexRequest() { @@ -388,18 +385,7 @@ public class CreateIndexRequest extends AcknowledgedRequest } else if (ALIASES.match(name, deprecationHandler)) { aliases((Map) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } else { - // found a key which is neither custom defined nor one of the supported ones - throw new ElasticsearchParseException("unknown key [{}] for create index", name); - } + throw new ElasticsearchParseException("unknown key [{}] for create index", name); } } return this; @@ -413,18 +399,6 @@ public class CreateIndexRequest extends AcknowledgedRequest return this.aliases; } - /** - * Adds custom metadata to the index to be created. - */ - public CreateIndexRequest custom(IndexMetaData.Custom custom) { - customs.put(custom.type(), custom); - return this; - } - - public Map customs() { - return this.customs; - } - public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } @@ -474,11 +448,13 @@ public class CreateIndexRequest extends AcknowledgedRequest } mappings.put(type, source); } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); - customs.put(type, customIndexMetaData); + if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + // This used to be the size of custom metadata classes + int customSize = in.readVInt(); + assert customSize == 0 : "unexpected custom metadata when none is supported"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -501,10 +477,9 @@ public class CreateIndexRequest extends AcknowledgedRequest out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - out.writeVInt(customs.size()); - for (Map.Entry entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + // Size of custom index metadata, which is removed + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -542,10 +517,6 @@ public class CreateIndexRequest extends AcknowledgedRequest alias.toXContent(builder, params); } builder.endObject(); - - for (Map.Entry entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index cc8fb2c32c3..d2593e7e94b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -224,14 +223,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 7195fd78154..5459805416e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -185,7 +185,6 @@ public class TransportResizeAction extends TransportMasterNodeAction aliases = new HashSet<>(); - private Map customs = new HashMap<>(); - private Integer version; public PutIndexTemplateRequest() { @@ -353,15 +350,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest) entry.getValue()); } else { - // maybe custom? - IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); - if (proto != null) { - try { - customs.put(name, proto.fromMap((Map) entry.getValue())); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name); - } - } + throw new ElasticsearchParseException("unknown key [{}] in the template ", name); } } return this; @@ -395,15 +384,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest customs() { - return this.customs; - } - public Set aliases() { return this.aliases; } @@ -494,11 +474,13 @@ public class PutIndexTemplateRequest extends MasterNodeRequest 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int aliasesSize = in.readVInt(); for (int i = 0; i < aliasesSize; i++) { @@ -525,10 +507,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest entry : customs.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeVInt(0); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { @@ -565,10 +545,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest entry : customs.entrySet()) { - builder.field(entry.getKey(), entry.getValue(), params); - } - return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index bd8621a1a7d..34eccbf9d8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -84,7 +84,6 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

> responseMap, List indexResponses) { - this.responseMap = responseMap; - this.indexResponses = indexResponses; + this.responseMap = Objects.requireNonNull(responseMap); + this.indexResponses = Objects.requireNonNull(indexResponses); } /** * Used for serialization */ FieldCapabilitiesResponse() { - this.responseMap = Collections.emptyMap(); + this(Collections.emptyMap(), Collections.emptyList()); } /** @@ -81,6 +82,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont List getIndexResponses() { return indexResponses; } + /** * * Get the field capabilities per type for the provided {@code field}. diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index ef0d19a2655..b8d1f477ac1 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -90,7 +90,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction innerListener = new ActionListener() { @Override diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index fecee5f265f..7514a41f575 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -171,9 +171,11 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent return new Parsed(pipeline, ingestDocumentList, verbose); } - static Parsed parse(Map config, boolean verbose, IngestService pipelineStore) throws Exception { + static Parsed parse(Map config, boolean verbose, IngestService ingestService) throws Exception { Map pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE); - Pipeline pipeline = Pipeline.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories()); + Pipeline pipeline = Pipeline.create( + SIMULATED_PIPELINE_ID, pipelineConfig, ingestService.getProcessorFactories(), ingestService.getScriptService() + ); List ingestDocumentList = parseDocs(config); return new Parsed(pipeline, ingestDocumentList, verbose); } diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index adb2f509b99..f97f618347a 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -455,7 +455,7 @@ public interface Client extends ElasticsearchClient, Releasable { /** * Builder for the field capabilities request. */ - FieldCapabilitiesRequestBuilder prepareFieldCaps(); + FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices); /** * An action that returns the field capabilities from the provided request diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 86d9d2c445f..553c92e6de8 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -651,8 +651,8 @@ public abstract class AbstractClient extends AbstractComponent implements Client } @Override - public FieldCapabilitiesRequestBuilder prepareFieldCaps() { - return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE); + public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) { + return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices); } static class Admin implements AdminClient { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java new file mode 100644 index 00000000000..4aa429f5704 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * This is a {@code Map} that implements AbstractDiffable so it + * can be used for cluster state purposes + */ +public class DiffableStringMap extends AbstractMap implements Diffable { + + private final Map innerMap; + + DiffableStringMap(final Map map) { + this.innerMap = map; + } + + @SuppressWarnings("unchecked") + DiffableStringMap(final StreamInput in) throws IOException { + this.innerMap = (Map) (Map) in.readMap(); + } + + @Override + public String put(String key, String value) { + return innerMap.put(key, value); + } + + @Override + public Set> entrySet() { + return innerMap.entrySet(); + } + + @Override + @SuppressWarnings("unchecked") + public void writeTo(StreamOutput out) throws IOException { + out.writeMap((Map) (Map) innerMap); + } + + @Override + public Diff diff(DiffableStringMap previousState) { + return new DiffableStringMapDiff(previousState, this); + } + + public static Diff readDiffFrom(StreamInput in) throws IOException { + return new DiffableStringMapDiff(in); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj instanceof DiffableStringMap) { + DiffableStringMap other = (DiffableStringMap) obj; + return innerMap.equals(other.innerMap); + } else if (obj instanceof Map) { + Map other = (Map) obj; + return innerMap.equals(other); + } else { + return false; + } + } + + @Override + public int hashCode() { + return innerMap.hashCode(); + } + + @Override + public String toString() { + return "DiffableStringMap[" + innerMap.toString() + "]"; + } + + /** + * Represents differences between two DiffableStringMaps. + */ + public static class DiffableStringMapDiff implements Diff { + + private final List deletes; + private final Map upserts; // diffs also become upserts + + private DiffableStringMapDiff(DiffableStringMap before, DiffableStringMap after) { + final List tempDeletes = new ArrayList<>(); + final Map tempUpserts = new HashMap<>(); + for (String key : before.keySet()) { + if (after.containsKey(key) == false) { + tempDeletes.add(key); + } + } + + for (Map.Entry partIter : after.entrySet()) { + String beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + tempUpserts.put(partIter.getKey(), partIter.getValue()); + } + } + deletes = tempDeletes; + upserts = tempUpserts; + } + + private DiffableStringMapDiff(StreamInput in) throws IOException { + deletes = new ArrayList<>(); + upserts = new HashMap<>(); + int deletesCount = in.readVInt(); + for (int i = 0; i < deletesCount; i++) { + deletes.add(in.readString()); + } + int upsertsCount = in.readVInt(); + for (int i = 0; i < upsertsCount; i++) { + String key = in.readString(); + String newValue = in.readString(); + upserts.put(key, newValue); + } + } + + public List getDeletes() { + return deletes; + } + + public Map> getDiffs() { + return Collections.emptyMap(); + } + + public Map getUpserts() { + return upserts; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletes.size()); + for (String delete : deletes) { + out.writeString(delete); + } + out.writeVInt(upserts.size()); + for (Map.Entry entry : upserts.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + } + + @Override + public DiffableStringMap apply(DiffableStringMap part) { + Map builder = new HashMap<>(part.innerMap); + List deletes = getDeletes(); + for (String delete : deletes) { + builder.remove(delete); + } + assert getDiffs().size() == 0 : "there should never be diffs for DiffableStringMap"; + + for (Map.Entry upsert : upserts.entrySet()) { + builder.put(upsert.getKey(), upsert.getValue()); + } + return new DiffableStringMap(builder); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 11c489f63ab..e3af709ec5f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.Assertions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; @@ -65,7 +64,6 @@ import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Locale; @@ -81,59 +79,6 @@ import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; public class IndexMetaData implements Diffable, ToXContentFragment { - /** - * This class will be removed in v7.0 - */ - @Deprecated - public interface Custom extends Diffable, ToXContent { - - String type(); - - Custom fromMap(Map map) throws IOException; - - Custom fromXContent(XContentParser parser) throws IOException; - - /** - * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput - */ - Diff readDiffFrom(StreamInput in) throws IOException; - - /** - * Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged. - */ - Custom readFrom(StreamInput in) throws IOException; - - /** - * Merges from this to another, with this being more important, i.e., if something exists in this and another, - * this will prevail. - */ - Custom mergeWith(Custom another); - } - - public static Map customPrototypes = new HashMap<>(); - - /** - * Register a custom index meta data factory. Make sure to call it from a static block. - */ - public static void registerPrototype(String type, Custom proto) { - customPrototypes.put(type, proto); - } - - @Nullable - public static T lookupPrototype(String type) { - //noinspection unchecked - return (T) customPrototypes.get(type); - } - - public static T lookupPrototypeSafe(String type) { - //noinspection unchecked - T proto = (T) customPrototypes.get(type); - if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); - } - return proto; - } - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); @@ -324,7 +269,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private final ImmutableOpenMap mappings; - private final ImmutableOpenMap customs; + private final ImmutableOpenMap customData; private final ImmutableOpenIntMap> inSyncAllocationIds; @@ -343,7 +288,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private IndexMetaData(Index index, long version, long mappingVersion, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, - ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds, + ImmutableOpenMap customData, ImmutableOpenIntMap> inSyncAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { @@ -360,7 +305,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1); this.settings = settings; this.mappings = mappings; - this.customs = customs; + this.customData = customData; this.aliases = aliases; this.inSyncAllocationIds = inSyncAllocationIds; this.requireFilters = requireFilters; @@ -485,22 +430,14 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return mappings.get(mappingType); } - // we keep the shrink settings for BWC - this can be removed in 8.0 - // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 - public static final String INDEX_SHRINK_SOURCE_UUID_KEY = "index.shrink.source.uuid"; - public static final String INDEX_SHRINK_SOURCE_NAME_KEY = "index.shrink.source.name"; public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; public static final String INDEX_RESIZE_SOURCE_NAME_KEY = "index.resize.source.name"; - public static final Setting INDEX_SHRINK_SOURCE_UUID = Setting.simpleString(INDEX_SHRINK_SOURCE_UUID_KEY); - public static final Setting INDEX_SHRINK_SOURCE_NAME = Setting.simpleString(INDEX_SHRINK_SOURCE_NAME_KEY); - public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY, - INDEX_SHRINK_SOURCE_UUID); - public static final Setting INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY, - INDEX_SHRINK_SOURCE_NAME); + public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY); + public static final Setting INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY); public Index getResizeSourceIndex() { - return INDEX_RESIZE_SOURCE_UUID.exists(settings) || INDEX_SHRINK_SOURCE_UUID.exists(settings) - ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), INDEX_RESIZE_SOURCE_UUID.get(settings)) : null; + return INDEX_RESIZE_SOURCE_UUID.exists(settings) ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), + INDEX_RESIZE_SOURCE_UUID.get(settings)) : null; } /** @@ -519,13 +456,12 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return mappings.get(MapperService.DEFAULT_MAPPING); } - public ImmutableOpenMap getCustoms() { - return this.customs; + ImmutableOpenMap getCustomData() { + return this.customData; } - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); + public Map getCustomData(final String key) { + return Collections.unmodifiableMap(this.customData.get(key)); } public ImmutableOpenIntMap> getInSyncAllocationIds() { @@ -591,7 +527,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen if (state != that.state) { return false; } - if (!customs.equals(that.customs)) { + if (!customData.equals(that.customData)) { return false; } if (routingNumShards != that.routingNumShards) { @@ -620,7 +556,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen result = 31 * result + aliases.hashCode(); result = 31 * result + settings.hashCode(); result = 31 * result + mappings.hashCode(); - result = 31 * result + customs.hashCode(); + result = 31 * result + customData.hashCode(); result = 31 * result + Long.hashCode(routingFactor); result = 31 * result + Long.hashCode(routingNumShards); result = 31 * result + Arrays.hashCode(primaryTerms); @@ -660,7 +596,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private final Settings settings; private final Diff> mappings; private final Diff> aliases; - private final Diff> customs; + private final Diff> customData; private final Diff>> inSyncAllocationIds; private final Diff> rolloverInfos; @@ -674,7 +610,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen primaryTerms = after.primaryTerms; mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer()); aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer()); - customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer()); + customData = DiffableUtils.diff(before.customData, after.customData, DiffableUtils.getStringKeySerializer()); inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer()); @@ -696,18 +632,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen MappingMetaData::readDiffFrom); aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new, AliasMetaData::readDiffFrom); - customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), - new DiffableUtils.DiffableValueSerializer() { - @Override - public Custom read(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readFrom(in); - } - - @Override - public Diff readDiff(StreamInput in, String key) throws IOException { - return lookupPrototypeSafe(key).readDiffFrom(in); - } - }); + customData = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DiffableStringMap::new, + DiffableStringMap::readDiffFrom); inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance()); if (in.getVersion().onOrAfter(Version.V_6_4_0)) { @@ -732,7 +658,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen out.writeVLongArray(primaryTerms); mappings.writeTo(out); aliases.writeTo(out); - customs.writeTo(out); + customData.writeTo(out); inSyncAllocationIds.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_4_0)) { rolloverInfos.writeTo(out); @@ -750,7 +676,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen builder.primaryTerms(primaryTerms); builder.mappings.putAll(mappings.apply(part.mappings)); builder.aliases.putAll(aliases.apply(part.aliases)); - builder.customs.putAll(customs.apply(part.customs)); + builder.customMetaData.putAll(customData.apply(part.customData)); builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds)); builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); return builder.build(); @@ -780,10 +706,17 @@ public class IndexMetaData implements Diffable, ToXContentFragmen builder.putAlias(aliasMd); } int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + for (int i = 0; i < customSize; i++) { + String key = in.readString(); + DiffableStringMap custom = new DiffableStringMap(in); + builder.putCustom(key, custom); + } + } else { + assert customSize == 0 : "expected no custom index metadata"; + if (customSize > 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } int inSyncAllocationIdsSize = in.readVInt(); for (int i = 0; i < inSyncAllocationIdsSize; i++) { @@ -819,10 +752,14 @@ public class IndexMetaData implements Diffable, ToXContentFragmen for (ObjectCursor cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeVInt(customData.size()); + for (final ObjectObjectCursor cursor : customData) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } else { + out.writeVInt(0); } out.writeVInt(inSyncAllocationIds.size()); for (IntObjectCursor> cursor : inSyncAllocationIds) { @@ -855,7 +792,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private Settings settings = Settings.Builder.EMPTY_SETTINGS; private final ImmutableOpenMap.Builder mappings; private final ImmutableOpenMap.Builder aliases; - private final ImmutableOpenMap.Builder customs; + private final ImmutableOpenMap.Builder customMetaData; private final ImmutableOpenIntMap.Builder> inSyncAllocationIds; private final ImmutableOpenMap.Builder rolloverInfos; private Integer routingNumShards; @@ -864,7 +801,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen this.index = index; this.mappings = ImmutableOpenMap.builder(); this.aliases = ImmutableOpenMap.builder(); - this.customs = ImmutableOpenMap.builder(); + this.customMetaData = ImmutableOpenMap.builder(); this.inSyncAllocationIds = ImmutableOpenIntMap.builder(); this.rolloverInfos = ImmutableOpenMap.builder(); } @@ -878,7 +815,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen this.primaryTerms = indexMetaData.primaryTerms.clone(); this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings); this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases); - this.customs = ImmutableOpenMap.builder(indexMetaData.customs); + this.customMetaData = ImmutableOpenMap.builder(indexMetaData.customData); this.routingNumShards = indexMetaData.routingNumShards; this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds); this.rolloverInfos = ImmutableOpenMap.builder(indexMetaData.rolloverInfos); @@ -1008,8 +945,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return this; } - public Builder putCustom(String type, Custom customIndexMetaData) { - this.customs.put(type, customIndexMetaData); + public Builder putCustom(String type, Map customIndexMetaData) { + this.customMetaData.put(type, new DiffableStringMap(customIndexMetaData)); return this; } @@ -1177,7 +1114,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); return new IndexMetaData(new Index(index, uuid), version, mappingVersion, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, + tmpAliases.build(), customMetaData.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); } @@ -1205,10 +1142,9 @@ public class IndexMetaData implements Diffable, ToXContentFragmen } builder.endArray(); - for (ObjectObjectCursor cursor : indexMetaData.getCustoms()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); + for (ObjectObjectCursor cursor : indexMetaData.customData) { + builder.field(cursor.key); + builder.map(cursor.value); } builder.startObject(KEY_ALIASES); @@ -1317,15 +1253,8 @@ public class IndexMetaData implements Diffable, ToXContentFragmen assert Version.CURRENT.major <= 5; parser.skipChildren(); } else { - // check if its a custom index metadata - Custom proto = lookupPrototype(currentFieldName); - if (proto == null) { - //TODO warn - parser.skipChildren(); - } else { - Custom custom = proto.fromXContent(parser); - builder.putCustom(custom.type(), custom); - } + // assume it's custom index metadata + builder.putCustom(currentFieldName, parser.mapStrings()); } } else if (token == XContentParser.Token.START_ARRAY) { if (KEY_MAPPINGS.equals(currentFieldName)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index d35a4baa1e6..c3f0f86e3e9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -20,7 +20,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; @@ -87,13 +87,10 @@ public class IndexTemplateMetaData extends AbstractDiffable aliases; - private final ImmutableOpenMap customs; - public IndexTemplateMetaData(String name, int order, Integer version, List patterns, Settings settings, ImmutableOpenMap mappings, - ImmutableOpenMap aliases, - ImmutableOpenMap customs) { + ImmutableOpenMap aliases) { if (patterns == null || patterns.isEmpty()) { throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns); } @@ -104,7 +101,6 @@ public class IndexTemplateMetaData extends AbstractDiffable customs() { - return this.customs; - } - - public ImmutableOpenMap getCustoms() { - return this.customs; - } - - @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); - } - public static Builder builder(String name) { return new Builder(name); } @@ -227,11 +210,13 @@ public class IndexTemplateMetaData extends AbstractDiffable 0) { + throw new IllegalStateException("unexpected custom metadata when none is supported"); + } } builder.version(in.readOptionalVInt()); return builder.build(); @@ -260,10 +245,8 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : aliases.values()) { cursor.value.writeTo(out); } - out.writeVInt(customs.size()); - for (ObjectObjectCursor cursor : customs) { - out.writeString(cursor.key); - cursor.value.writeTo(out); + if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + out.writeVInt(0); } out.writeOptionalVInt(version); } @@ -272,9 +255,6 @@ public class IndexTemplateMetaData extends AbstractDiffable VALID_FIELDS = Sets.newHashSet( "template", "order", "mappings", "settings", "index_patterns", "aliases", "version"); - static { - VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); - } private String name; @@ -290,13 +270,10 @@ public class IndexTemplateMetaData extends AbstractDiffable aliases; - private final ImmutableOpenMap.Builder customs; - public Builder(String name) { this.name = name; mappings = ImmutableOpenMap.builder(); aliases = ImmutableOpenMap.builder(); - customs = ImmutableOpenMap.builder(); } public Builder(IndexTemplateMetaData indexTemplateMetaData) { @@ -308,7 +285,6 @@ public class IndexTemplateMetaData extends AbstractDiffable cursor : indexTemplateMetaData.customs()) { - builder.startObject(cursor.key); - cursor.value.toXContent(builder, params); - builder.endObject(); - } - builder.startObject("aliases"); for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { AliasMetaData.Builder.toXContent(cursor.value, builder, params); @@ -468,15 +423,7 @@ public class IndexTemplateMetaData extends AbstractDiffable templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); - Map customs = new HashMap<>(); + Map> customs = new HashMap<>(); // add the request mapping Map> mappings = new HashMap<>(); @@ -300,10 +299,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue())); } - for (Map.Entry entry : request.customs().entrySet()) { - customs.put(entry.getKey(), entry.getValue()); - } - final Index recoverFromIndex = request.recoverFrom(); if (recoverFromIndex == null) { @@ -320,18 +315,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { MapperService.parseMapping(xContentRegistry, mappingString)); } } - // handle custom - for (ObjectObjectCursor cursor : template.customs()) { - String type = cursor.key; - IndexMetaData.Custom custom = cursor.value; - IndexMetaData.Custom existing = customs.get(type); - if (existing == null) { - customs.put(type, custom); - } else { - IndexMetaData.Custom merged = existing.mergeWith(custom); - customs.put(type, merged); - } - } //handle aliases for (ObjectObjectCursor cursor : template.aliases()) { AliasMetaData aliasMetaData = cursor.value; @@ -519,7 +502,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { indexMetaDataBuilder.putAlias(aliasMetaData); } - for (Map.Entry customEntry : customs.entrySet()) { + for (Map.Entry> customEntry : customs.entrySet()) { indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue()); } @@ -723,10 +706,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { .put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id", Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) // we only try once and then give up with a shrink index - .put("index.allocation.max_retries", 1) - // we add the legacy way of specifying it here for BWC. We can remove this once it's backported to 6.x - .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) - .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); + .put("index.allocation.max_retries", 1); } else if (type == ResizeType.SPLIT) { validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build()); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 507eaf412d5..1baeb2459f0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -179,9 +179,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent { .indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build(); templateBuilder.putAlias(aliasMetaData); } - for (Map.Entry entry : request.customs.entrySet()) { - templateBuilder.putCustom(entry.getKey(), entry.getValue()); - } IndexTemplateMetaData template = templateBuilder.build(); MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template); @@ -339,7 +336,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent { Settings settings = Settings.Builder.EMPTY_SETTINGS; Map mappings = new HashMap<>(); List aliases = new ArrayList<>(); - Map customs = new HashMap<>(); TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; @@ -378,11 +374,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent { return this; } - public PutRequest customs(Map customs) { - this.customs.putAll(customs); - return this; - } - public PutRequest putMapping(String mappingType, String mappingSource) { mappings.put(mappingType, mappingSource); return this; diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 137378f509d..46e3867f7ae 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -202,8 +202,10 @@ public final class IndexScopedSettings extends AbstractScopedSettings { case IndexMetaData.SETTING_VERSION_UPGRADED: case IndexMetaData.SETTING_INDEX_PROVIDED_NAME: case MergePolicyConfig.INDEX_MERGE_ENABLED: - case IndexMetaData.INDEX_SHRINK_SOURCE_UUID_KEY: - case IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY: + // we keep the shrink settings for BWC - this can be removed in 8.0 + // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 + case "index.shrink.source.uuid": + case "index.shrink.source.name": case IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY: case IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY: return true; diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index e47fe7a7a70..f34798605d7 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.single.SingleNodeDiscovery; +import org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider; import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -40,6 +41,7 @@ import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -69,10 +71,11 @@ public class DiscoveryModule { public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, List plugins, - AllocationService allocationService) { + AllocationService allocationService, Path configFile) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService)); + hostProviders.put("file", () -> new FileBasedUnicastHostsProvider(settings, configFile)); for (DiscoveryPlugin plugin : plugins) { plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> { if (hostProviders.put(entry.getKey(), entry.getValue()) != null) { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java new file mode 100644 index 00000000000..f339ae43a70 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProvider.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.discovery.zen; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * An implementation of {@link UnicastHostsProvider} that reads hosts/ports + * from {@link #UNICAST_HOSTS_FILE}. + * + * Each unicast host/port that is part of the discovery process must be listed on + * a separate line. If the port is left off an entry, a default port of 9300 is + * assumed. An example unicast hosts file could read: + * + * 67.81.244.10 + * 67.81.244.11:9305 + * 67.81.244.15:9400 + */ +public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { + + public static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt"; + + private final Path unicastHostsFilePath; + private final Path legacyUnicastHostsFilePath; + + public FileBasedUnicastHostsProvider(Settings settings, Path configFile) { + super(settings); + this.unicastHostsFilePath = configFile.resolve(UNICAST_HOSTS_FILE); + this.legacyUnicastHostsFilePath = configFile.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE); + } + + private List getHostsList() { + if (Files.exists(unicastHostsFilePath)) { + return readFileContents(unicastHostsFilePath); + } + + if (Files.exists(legacyUnicastHostsFilePath)) { + deprecationLogger.deprecated("Found dynamic hosts list at [{}] but this path is deprecated. This list should be at [{}] " + + "instead. Support for the deprecated path will be removed in future.", legacyUnicastHostsFilePath, unicastHostsFilePath); + return readFileContents(legacyUnicastHostsFilePath); + } + + logger.warn("expected, but did not find, a dynamic hosts list at [{}]", unicastHostsFilePath); + + return Collections.emptyList(); + } + + private List readFileContents(Path path) { + try (Stream lines = Files.lines(path)) { + return lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments + .collect(Collectors.toList()); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("failed to read file [{}]", unicastHostsFilePath), e); + return Collections.emptyList(); + } + } + + @Override + public List buildDynamicHosts(HostsResolver hostsResolver) { + final List transportAddresses = hostsResolver.resolveHosts(getHostsList(), 1); + logger.debug("seed addresses: {}", transportAddresses); + return transportAddresses; + } +} diff --git a/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java b/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java new file mode 100644 index 00000000000..d1eb651acae --- /dev/null +++ b/server/src/main/java/org/elasticsearch/ingest/ConditionalProcessor.java @@ -0,0 +1,381 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import org.elasticsearch.script.IngestConditionalScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; + +public class ConditionalProcessor extends AbstractProcessor { + + static final String TYPE = "conditional"; + + private final Script condition; + + private final ScriptService scriptService; + + private final Processor processor; + + ConditionalProcessor(String tag, Script script, ScriptService scriptService, Processor processor) { + super(tag); + this.condition = script; + this.scriptService = scriptService; + this.processor = processor; + } + + @Override + public void execute(IngestDocument ingestDocument) throws Exception { + IngestConditionalScript script = + scriptService.compile(condition, IngestConditionalScript.CONTEXT).newInstance(condition.getParams()); + if (script.execute(new UnmodifiableIngestData(ingestDocument.getSourceAndMetadata()))) { + processor.execute(ingestDocument); + } + } + + @Override + public String getType() { + return TYPE; + } + + private static Object wrapUnmodifiable(Object raw) { + // Wraps all mutable types that the JSON parser can create by immutable wrappers. + // Any inputs not wrapped are assumed to be immutable + if (raw instanceof Map) { + return new UnmodifiableIngestData((Map) raw); + } else if (raw instanceof List) { + return new UnmodifiableIngestList((List) raw); + } else if (raw instanceof byte[]) { + return ((byte[]) raw).clone(); + } + return raw; + } + + private static UnsupportedOperationException unmodifiableException() { + return new UnsupportedOperationException("Mutating ingest documents in conditionals is not supported"); + } + + private static final class UnmodifiableIngestData implements Map { + + private final Map data; + + UnmodifiableIngestData(Map data) { + this.data = data; + } + + @Override + public int size() { + return data.size(); + } + + @Override + public boolean isEmpty() { + return data.isEmpty(); + } + + @Override + public boolean containsKey(final Object key) { + return data.containsKey(key); + } + + @Override + public boolean containsValue(final Object value) { + return data.containsValue(value); + } + + @Override + public Object get(final Object key) { + return wrapUnmodifiable(data.get(key)); + } + + @Override + public Object put(final String key, final Object value) { + throw unmodifiableException(); + } + + @Override + public Object remove(final Object key) { + throw unmodifiableException(); + } + + @Override + public void putAll(final Map m) { + throw unmodifiableException(); + } + + @Override + public void clear() { + throw unmodifiableException(); + } + + @Override + public Set keySet() { + return Collections.unmodifiableSet(data.keySet()); + } + + @Override + public Collection values() { + return new UnmodifiableIngestList(new ArrayList<>(data.values())); + } + + @Override + public Set> entrySet() { + return data.entrySet().stream().map(entry -> + new Entry() { + @Override + public String getKey() { + return entry.getKey(); + } + + @Override + public Object getValue() { + return wrapUnmodifiable(entry.getValue()); + } + + @Override + public Object setValue(final Object value) { + throw unmodifiableException(); + } + + @Override + public boolean equals(final Object o) { + return entry.equals(o); + } + + @Override + public int hashCode() { + return entry.hashCode(); + } + }).collect(Collectors.toSet()); + } + } + + private static final class UnmodifiableIngestList implements List { + + private final List data; + + UnmodifiableIngestList(List data) { + this.data = data; + } + + @Override + public int size() { + return data.size(); + } + + @Override + public boolean isEmpty() { + return data.isEmpty(); + } + + @Override + public boolean contains(final Object o) { + return data.contains(o); + } + + @Override + public Iterator iterator() { + Iterator wrapped = data.iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return wrapped.hasNext(); + } + + @Override + public Object next() { + return wrapped.next(); + } + + @Override + public void remove() { + throw unmodifiableException(); + } + }; + } + + @Override + public Object[] toArray() { + Object[] wrapped = data.toArray(new Object[0]); + for (int i = 0; i < wrapped.length; i++) { + wrapped[i] = wrapUnmodifiable(wrapped[i]); + } + return wrapped; + } + + @Override + public T[] toArray(final T[] a) { + Object[] raw = data.toArray(new Object[0]); + T[] wrapped = (T[]) Arrays.copyOf(raw, a.length, a.getClass()); + for (int i = 0; i < wrapped.length; i++) { + wrapped[i] = (T) wrapUnmodifiable(wrapped[i]); + } + return wrapped; + } + + @Override + public boolean add(final Object o) { + throw unmodifiableException(); + } + + @Override + public boolean remove(final Object o) { + throw unmodifiableException(); + } + + @Override + public boolean containsAll(final Collection c) { + return data.contains(c); + } + + @Override + public boolean addAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean removeAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public boolean retainAll(final Collection c) { + throw unmodifiableException(); + } + + @Override + public void clear() { + throw unmodifiableException(); + } + + @Override + public Object get(final int index) { + return wrapUnmodifiable(data.get(index)); + } + + @Override + public Object set(final int index, final Object element) { + throw unmodifiableException(); + } + + @Override + public void add(final int index, final Object element) { + throw unmodifiableException(); + } + + @Override + public Object remove(final int index) { + throw unmodifiableException(); + } + + @Override + public int indexOf(final Object o) { + return data.indexOf(o); + } + + @Override + public int lastIndexOf(final Object o) { + return data.lastIndexOf(o); + } + + @Override + public ListIterator listIterator() { + return new UnmodifiableListIterator(data.listIterator()); + } + + @Override + public ListIterator listIterator(final int index) { + return new UnmodifiableListIterator(data.listIterator(index)); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + return new UnmodifiableIngestList(data.subList(fromIndex, toIndex)); + } + + private static final class UnmodifiableListIterator implements ListIterator { + + private final ListIterator data; + + UnmodifiableListIterator(ListIterator data) { + this.data = data; + } + + @Override + public boolean hasNext() { + return data.hasNext(); + } + + @Override + public Object next() { + return wrapUnmodifiable(data.next()); + } + + @Override + public boolean hasPrevious() { + return data.hasPrevious(); + } + + @Override + public Object previous() { + return wrapUnmodifiable(data.previous()); + } + + @Override + public int nextIndex() { + return data.nextIndex(); + } + + @Override + public int previousIndex() { + return data.previousIndex(); + } + + @Override + public void remove() { + throw unmodifiableException(); + } + + @Override + public void set(final Object o) { + throw unmodifiableException(); + } + + @Override + public void add(final Object o) { + throw unmodifiableException(); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 54d06d11655..d4f27f47eb8 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -19,9 +19,18 @@ package org.elasticsearch.ingest; +import java.io.IOException; +import java.io.InputStream; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; @@ -296,6 +305,7 @@ public final class ConfigurationUtils { } public static List readProcessorConfigs(List> processorConfigs, + ScriptService scriptService, Map processorFactories) throws Exception { Exception exception = null; List processors = new ArrayList<>(); @@ -303,7 +313,7 @@ public final class ConfigurationUtils { for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { try { - processors.add(readProcessor(processorFactories, entry.getKey(), entry.getValue())); + processors.add(readProcessor(processorFactories, scriptService, entry.getKey(), entry.getValue())); } catch (Exception e) { exception = ExceptionsHelper.useOrSuppress(exception, e); } @@ -356,13 +366,14 @@ public final class ConfigurationUtils { @SuppressWarnings("unchecked") public static Processor readProcessor(Map processorFactories, + ScriptService scriptService, String type, Object config) throws Exception { if (config instanceof Map) { - return readProcessor(processorFactories, type, (Map) config); + return readProcessor(processorFactories, scriptService, type, (Map) config); } else if (config instanceof String && "script".equals(type)) { Map normalizedScript = new HashMap<>(1); normalizedScript.put(ScriptType.INLINE.getParseField().getPreferredName(), config); - return readProcessor(processorFactories, type, normalizedScript); + return readProcessor(processorFactories, scriptService, type, normalizedScript); } else { throw newConfigurationException(type, null, null, "property isn't a map, but of type [" + config.getClass().getName() + "]"); @@ -370,15 +381,17 @@ public final class ConfigurationUtils { } public static Processor readProcessor(Map processorFactories, + ScriptService scriptService, String type, Map config) throws Exception { String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); + Script conditionalScript = extractConditional(config); Processor.Factory factory = processorFactories.get(type); if (factory != null) { boolean ignoreFailure = ConfigurationUtils.readBooleanProperty(null, null, config, "ignore_failure", false); List> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY); - List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorFactories); + List onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories); if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) { throw newConfigurationException(type, tag, Pipeline.ON_FAILURE_KEY, @@ -392,14 +405,42 @@ public final class ConfigurationUtils { type, Arrays.toString(config.keySet().toArray())); } if (onFailureProcessors.size() > 0 || ignoreFailure) { - return new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors); - } else { - return processor; + processor = new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors); } + if (conditionalScript != null) { + processor = new ConditionalProcessor(tag, conditionalScript, scriptService, processor); + } + return processor; } catch (Exception e) { throw newConfigurationException(type, tag, null, e); } } throw newConfigurationException(type, tag, null, "No processor type exists with name [" + type + "]"); } + + private static Script extractConditional(Map config) throws IOException { + Object scriptSource = config.remove("if"); + if (scriptSource != null) { + try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent) + .map(normalizeScript(scriptSource)); + InputStream stream = BytesReference.bytes(builder).streamInput(); + XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, stream)) { + return Script.parse(parser); + } + } + return null; + } + + @SuppressWarnings("unchecked") + private static Map normalizeScript(Object scriptConfig) { + if (scriptConfig instanceof Map) { + return (Map) scriptConfig; + } else if (scriptConfig instanceof String) { + return Collections.singletonMap("source", scriptConfig); + } else { + throw newConfigurationException("conditional", null, "script", + "property isn't a map or string, but of type [" + scriptConfig.getClass().getName() + "]"); + } + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index eee14e95869..f0f5d76caab 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -71,6 +71,7 @@ public class IngestService implements ClusterStateApplier { public static final String NOOP_PIPELINE_NAME = "_none"; private final ClusterService clusterService; + private final ScriptService scriptService; private final Map processorFactories; // Ideally this should be in IngestMetadata class, but we don't have the processor factories around there. // We know of all the processor factories when a node with all its plugin have been initialized. Also some @@ -85,6 +86,7 @@ public class IngestService implements ClusterStateApplier { Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry, List ingestPlugins) { this.clusterService = clusterService; + this.scriptService = scriptService; this.processorFactories = processorFactories( ingestPlugins, new Processor.Parameters( @@ -116,6 +118,10 @@ public class IngestService implements ClusterStateApplier { return clusterService; } + public ScriptService getScriptService() { + return scriptService; + } + /** * Deletes the pipeline specified by id in the request. */ @@ -300,11 +306,12 @@ public class IngestService implements ClusterStateApplier { } Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories); + Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories, scriptService); List exceptions = new ArrayList<>(); for (Processor processor : pipeline.flattenAllProcessors()) { for (Map.Entry entry : ingestInfos.entrySet()) { - if (entry.getValue().containsProcessor(processor.getType()) == false) { + String type = processor.getType(); + if (entry.getValue().containsProcessor(type) == false && ConditionalProcessor.TYPE.equals(type) == false) { String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]"; exceptions.add( ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message) @@ -452,7 +459,10 @@ public class IngestService implements ClusterStateApplier { List exceptions = new ArrayList<>(); for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) { try { - pipelines.put(pipeline.getId(), Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories)); + pipelines.put( + pipeline.getId(), + Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories, scriptService) + ); } catch (ElasticsearchParseException e) { pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e)); exceptions.add(e); diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index 37dd3f52cb7..0a8f9fbc0d8 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -26,6 +26,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import org.elasticsearch.script.ScriptService; /** * A pipeline is a list of {@link Processor} instances grouped under a unique id. @@ -52,14 +53,15 @@ public final class Pipeline { } public static Pipeline create(String id, Map config, - Map processorFactories) throws Exception { + Map processorFactories, ScriptService scriptService) throws Exception { String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); List> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY); - List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorFactories); + List processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, scriptService, processorFactories); List> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY); - List onFailureProcessors = ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, processorFactories); + List onFailureProcessors = + ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories); if (config.isEmpty() == false) { throw new ElasticsearchParseException("pipeline [" + id + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray())); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 9dfd8d2a382..7c0513f9eeb 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -471,7 +471,7 @@ public class Node implements Closeable { final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry, networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(), clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class), - clusterModule.getAllocationService()); + clusterModule.getAllocationService(), environment.configFile()); this.nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(), transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(), httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index b452b62eb5e..746bb643bf6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -87,13 +87,19 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { - return - new ClusterGetSettingsResponse( - state.metaData().persistentSettings(), - state.metaData().transientSettings(), - renderDefaults ? - settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)) : - Settings.EMPTY - ).toXContent(builder, params); + return response(state, renderDefaults, settingsFilter, clusterSettings, settings).toXContent(builder, params); } + + static ClusterGetSettingsResponse response( + final ClusterState state, + final boolean renderDefaults, + final SettingsFilter settingsFilter, + final ClusterSettings clusterSettings, + final Settings settings) { + return new ClusterGetSettingsResponse( + settingsFilter.filter(state.metaData().persistentSettings()), + settingsFilter.filter(state.metaData().transientSettings()), + renderDefaults ? settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), settings)) : Settings.EMPTY); + } + } diff --git a/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java b/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java new file mode 100644 index 00000000000..27ce29b95dc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/IngestConditionalScript.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +/** + * A script used by {@link org.elasticsearch.ingest.ConditionalProcessor}. + */ +public abstract class IngestConditionalScript { + + public static final String[] PARAMETERS = { "ctx" }; + + /** The context used to compile {@link IngestConditionalScript} factories. */ + public static final ScriptContext CONTEXT = new ScriptContext<>("processor_conditional", Factory.class); + + /** The generic runtime parameters for the script. */ + private final Map params; + + public IngestConditionalScript(Map params) { + this.params = params; + } + + /** Return the parameters for this script. */ + public Map getParams() { + return params; + } + + public abstract boolean execute(Map ctx); + + public interface Factory { + IngestConditionalScript newInstance(Map params); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index f04e690fa42..1788d8c792b 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -51,6 +51,7 @@ public class ScriptModule { BucketAggregationSelectorScript.CONTEXT, SignificantTermsHeuristicScoreScript.CONTEXT, IngestScript.CONTEXT, + IngestConditionalScript.CONTEXT, FilterScript.CONTEXT, SimilarityScript.CONTEXT, SimilarityWeightScript.CONTEXT, diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java index 2da74c56f6a..a7f333abfa2 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhase.java @@ -57,6 +57,7 @@ public final class FetchSourceSubPhase implements FetchSubPhase { if (nestedHit) { value = getNestedSource((Map) value, hitContext); } + try { final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); @@ -81,6 +82,9 @@ public final class FetchSourceSubPhase implements FetchSubPhase { private Map getNestedSource(Map sourceAsMap, HitContext hitContext) { for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) { sourceAsMap = (Map) sourceAsMap.get(o.getField().string()); + if (sourceAsMap == null) { + return null; + } } return sourceAsMap; } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index b3824063242..90b730660dd 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -28,11 +28,15 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.AbstractStreamableXContentTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Predicate; +import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength; + public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase { @@ -48,22 +52,46 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe @Override protected FieldCapabilitiesResponse createTestInstance() { - Map> responses = new HashMap<>(); + if (randomBoolean()) { + // merged responses + Map> responses = new HashMap<>(); + + String[] fields = generateRandomStringArray(5, 10, false, true); + assertNotNull(fields); + + for (String field : fields) { + Map typesToCapabilities = new HashMap<>(); + String[] types = generateRandomStringArray(5, 10, false, false); + assertNotNull(types); + + for (String type : types) { + typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); + } + responses.put(field, typesToCapabilities); + } + return new FieldCapabilitiesResponse(responses); + } else { + // non-merged responses + List responses = new ArrayList<>(); + int numResponse = randomIntBetween(0, 10); + for (int i = 0; i < numResponse; i++) { + responses.add(createRandomIndexResponse()); + } + return new FieldCapabilitiesResponse(responses); + } + } + + + private FieldCapabilitiesIndexResponse createRandomIndexResponse() { + Map responses = new HashMap<>(); String[] fields = generateRandomStringArray(5, 10, false, true); assertNotNull(fields); for (String field : fields) { - Map typesToCapabilities = new HashMap<>(); - String[] types = generateRandomStringArray(5, 10, false, false); - assertNotNull(types); - - for (String type : types) { - typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field)); - } - responses.put(field, typesToCapabilities); + responses.put(field, FieldCapabilitiesTests.randomFieldCaps(field)); } - return new FieldCapabilitiesResponse(responses); + return new FieldCapabilitiesIndexResponse(randomAsciiLettersOfLength(10), responses); } @Override @@ -138,6 +166,11 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe "}").replaceAll("\\s+", ""), generatedResponse); } + public void testEmptyResponse() throws IOException { + FieldCapabilitiesResponse testInstance = new FieldCapabilitiesResponse(); + assertSerialization(testInstance); + } + private static FieldCapabilitiesResponse createSimpleResponse() { Map titleCapabilities = new HashMap<>(); titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false)); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java new file mode 100644 index 00000000000..341022030b3 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class DiffableStringMapTests extends ESTestCase { + + public void testDiffableStringMapDiff() { + Map m = new HashMap<>(); + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + DiffableStringMap dsm = new DiffableStringMap(m); + + Map m2 = new HashMap<>(); + m2.put("foo", "not-bar"); + m2.put("newkey", "yay"); + m2.put("baz", "eggplant"); + DiffableStringMap dsm2 = new DiffableStringMap(m2); + + Diff diff = dsm2.diff(dsm); + assertThat(diff, instanceOf(DiffableStringMap.DiffableStringMapDiff.class)); + DiffableStringMap.DiffableStringMapDiff dsmd = (DiffableStringMap.DiffableStringMapDiff) diff; + + assertThat(dsmd.getDeletes(), containsInAnyOrder("potato")); + assertThat(dsmd.getDiffs().size(), equalTo(0)); + Map upserts = new HashMap<>(); + upserts.put("foo", "not-bar"); + upserts.put("newkey", "yay"); + assertThat(dsmd.getUpserts(), equalTo(upserts)); + + DiffableStringMap dsm3 = diff.apply(dsm); + assertThat(dsm3.get("foo"), equalTo("not-bar")); + assertThat(dsm3.get("newkey"), equalTo("yay")); + assertThat(dsm3.get("baz"), equalTo("eggplant")); + assertThat(dsm3.get("potato"), equalTo(null)); + } + + public void testRandomDiffing() { + Map m = new HashMap<>(); + m.put("1", "1"); + m.put("2", "2"); + m.put("3", "3"); + DiffableStringMap dsm = new DiffableStringMap(m); + DiffableStringMap expected = new DiffableStringMap(m); + + for (int i = 0; i < randomIntBetween(5, 50); i++) { + if (randomBoolean() && expected.size() > 1) { + expected.remove(randomFrom(expected.keySet())); + } else if (randomBoolean()) { + expected.put(randomFrom(expected.keySet()), randomAlphaOfLength(4)); + } else { + expected.put(randomAlphaOfLength(2), randomAlphaOfLength(4)); + } + dsm = expected.diff(dsm).apply(dsm); + } + assertThat(expected, equalTo(dsm)); + } + + public void testSerialization() throws IOException { + Map m = new HashMap<>(); + // Occasionally have an empty map + if (frequently()) { + m.put("foo", "bar"); + m.put("baz", "eggplant"); + m.put("potato", "canon"); + } + DiffableStringMap dsm = new DiffableStringMap(m); + + BytesStreamOutput bso = new BytesStreamOutput(); + dsm.writeTo(bso); + DiffableStringMap deserialized = new DiffableStringMap(bso.bytes().streamInput()); + assertThat(deserialized, equalTo(dsm)); + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 744a29e843c..1aaec080307 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -56,11 +56,11 @@ import org.hamcrest.Matchers; import org.mockito.ArgumentCaptor; import java.io.IOException; -import java.util.Map; -import java.util.HashSet; -import java.util.Set; -import java.util.Collections; import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; @@ -71,13 +71,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.anyMap; -import static org.mockito.Mockito.times; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class IndexCreationTaskTests extends ESTestCase { @@ -127,14 +127,12 @@ public class IndexCreationTaskTests extends ESTestCase { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1")) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "value1")) ); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } @@ -142,41 +140,31 @@ public class IndexCreationTaskTests extends ESTestCase { public void testApplyDataFromRequest() throws Exception { setupRequestAlias(new Alias("alias1")); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); - assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1")); } public void testRequestDataHavePriorityOverTemplateData() throws Exception { - final IndexMetaData.Custom tplCustom = createCustom(); - final IndexMetaData.Custom reqCustom = createCustom(); - final IndexMetaData.Custom mergedCustom = createCustom(); - when(reqCustom.mergeWith(tplCustom)).thenReturn(mergedCustom); - final CompressedXContent tplMapping = createMapping("text"); final CompressedXContent reqMapping = createMapping("keyword"); addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", tplMapping) - .putCustom("custom1", tplCustom) .settings(Settings.builder().put("key1", "tplValue")) ); setupRequestAlias(new Alias("alias1").searchRouting("fromReq")); setupRequestMapping("mapping1", reqMapping); - setupRequestCustom("custom1", reqCustom); reqSettings.put("key1", "reqValue"); final ClusterState result = executeTask(); - assertThat(result.metaData().index("test").getCustoms().get("custom1"), equalTo(mergedCustom)); assertThat(result.metaData().index("test").getAliases().get("alias1").getSearchRouting(), equalTo("fromReq")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("reqValue")); assertThat(getMappingsFromResponse().get("mapping1").toString(), equalTo("{type={properties={field={type=keyword}}}}")); @@ -272,14 +260,13 @@ public class IndexCreationTaskTests extends ESTestCase { addMatchingTemplate(builder -> builder .putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build()) .putMapping("mapping1", createMapping()) - .putCustom("custom1", createCustom()) .settings(Settings.builder().put("key1", "tplValue")) ); final ClusterState result = executeTask(); assertThat(result.metaData().index("test").getAliases(), not(hasKey("alias1"))); - assertThat(result.metaData().index("test").getCustoms(), not(hasKey("custom1"))); + assertThat(result.metaData().index("test").getCustomData(), not(hasKey("custom1"))); assertThat(result.metaData().index("test").getSettings().keySet(), not(Matchers.contains("key1"))); assertThat(getMappingsFromResponse(), not(Matchers.hasKey("mapping1"))); } @@ -296,7 +283,6 @@ public class IndexCreationTaskTests extends ESTestCase { Boolean writeIndex = randomBoolean() ? null : randomBoolean(); setupRequestAlias(new Alias("alias1").writeIndex(writeIndex)); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); final ClusterState result = executeTask(); @@ -310,7 +296,6 @@ public class IndexCreationTaskTests extends ESTestCase { .numberOfShards(1).numberOfReplicas(0).build(); idxBuilder.put("test2", existingWriteIndex); setupRequestMapping("mapping1", createMapping()); - setupRequestCustom("custom1", createCustom()); reqSettings.put("key1", "value1"); setupRequestAlias(new Alias("alias1").writeIndex(true)); @@ -342,8 +327,8 @@ public class IndexCreationTaskTests extends ESTestCase { .numberOfReplicas(numReplicas); } - private IndexMetaData.Custom createCustom() { - return mock(IndexMetaData.Custom.class); + private Map createCustom() { + return Collections.singletonMap("a", "b"); } private interface MetaDataBuilderConfigurator { @@ -372,10 +357,6 @@ public class IndexCreationTaskTests extends ESTestCase { when(request.mappings()).thenReturn(Collections.singletonMap(mappingKey, mapping.string())); } - private void setupRequestCustom(String customKey, IndexMetaData.Custom custom) throws IOException { - when(request.customs()).thenReturn(Collections.singletonMap(customKey, custom)); - } - private CompressedXContent createMapping() throws IOException { return createMapping("text"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index 9e8a5e04f43..393f7f6b1d4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -45,6 +47,8 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Set; import static org.hamcrest.Matchers.is; @@ -71,6 +75,9 @@ public class IndexMetaDataTests extends ESTestCase { public void testIndexMetaDataSerialization() throws IOException { Integer numShard = randomFrom(1, 2, 4, 8, 16); int numberOfReplicas = randomIntBetween(0, 10); + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); IndexMetaData metaData = IndexMetaData.builder("foo") .settings(Settings.builder() .put("index.version.created", 1) @@ -80,6 +87,7 @@ public class IndexMetaDataTests extends ESTestCase { .creationDate(randomLong()) .primaryTerm(0, 2) .setRoutingNumShards(32) + .putCustom("my_custom", customMap) .putRolloverInfo( new RolloverInfo(randomAlphaOfLength(5), Arrays.asList(new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), @@ -93,7 +101,8 @@ public class IndexMetaDataTests extends ESTestCase { builder.endObject(); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); final IndexMetaData fromXContentMeta = IndexMetaData.fromXContent(parser); - assertEquals(metaData, fromXContentMeta); + assertEquals("expected: " + Strings.toString(metaData) + "\nactual : " + Strings.toString(fromXContentMeta), + metaData, fromXContentMeta); assertEquals(metaData.hashCode(), fromXContentMeta.hashCode()); assertEquals(metaData.getNumberOfReplicas(), fromXContentMeta.getNumberOfReplicas()); @@ -103,6 +112,11 @@ public class IndexMetaDataTests extends ESTestCase { assertEquals(metaData.getCreationDate(), fromXContentMeta.getCreationDate()); assertEquals(metaData.getRoutingFactor(), fromXContentMeta.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), fromXContentMeta.primaryTerm(0)); + ImmutableOpenMap.Builder expectedCustomBuilder = ImmutableOpenMap.builder(); + expectedCustomBuilder.put("my_custom", new DiffableStringMap(customMap)); + ImmutableOpenMap expectedCustom = expectedCustomBuilder.build(); + assertEquals(metaData.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), fromXContentMeta.getCustomData()); final BytesStreamOutput out = new BytesStreamOutput(); metaData.writeTo(out); @@ -119,6 +133,8 @@ public class IndexMetaDataTests extends ESTestCase { assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor()); assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0)); assertEquals(metaData.getRolloverInfos(), deserialized.getRolloverInfos()); + assertEquals(deserialized.getCustomData(), expectedCustom); + assertEquals(metaData.getCustomData(), deserialized.getCustomData()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java index c98587c4cc6..5fc07642354 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaDataTests.java @@ -78,13 +78,13 @@ public class IndexTemplateMetaDataTests extends ESTestCase { public void testValidateInvalidIndexPatterns() throws Exception { final IllegalArgumentException emptyPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(emptyPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got []")); final IllegalArgumentException nullPatternError = expectThrows(IllegalArgumentException.class, () -> { new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(), - null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of()); + null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of()); }); assertThat(nullPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got null")); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 10fc358e4d4..da0e0a9b0bc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -286,16 +286,19 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { metaBuilder.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put("index.uuid", "1234")) .numberOfShards(4).numberOfReplicas(0)); metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT).put("index.uuid", "5678") - .put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(1).numberOfReplicas(0)); + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")) + .numberOfShards(1) + .numberOfReplicas(0)); metaBuilder.put(IndexMetaData.builder("target2").settings(settings(Version.CURRENT).put("index.uuid", "9101112") - .put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(2).numberOfReplicas(0)); + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234")) + .numberOfShards(2).numberOfReplicas(0)); MetaData metaData = metaBuilder.build(); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); routingTableBuilder.addAsNew(metaData.index("test")); routingTableBuilder.addAsNew(metaData.index("target")); routingTableBuilder.addAsNew(metaData.index("target2")); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData).routingTable(routingTableBuilder.build()).build(); + ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build(); AllocationService allocationService = createAllocationService(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))) @@ -330,7 +333,6 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { assertEquals(100L, DiskThresholdDecider.getExpectedShardSize(test_1, allocation, 0)); assertEquals(10L, DiskThresholdDecider.getExpectedShardSize(test_0, allocation, 0)); - ShardRouting target = ShardRouting.newUnassigned(new ShardId(new Index("target", "5678"), 0), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); assertEquals(1110L, DiskThresholdDecider.getExpectedShardSize(target, allocation, 0)); @@ -350,12 +352,9 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { .build(); allocationService.reroute(clusterState, "foo"); - RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, clusterStateWithMissingSourceIndex.getRoutingNodes(), clusterStateWithMissingSourceIndex, info, 0); - assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target, allocationWithMissingSourceIndex, 42L)); assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target2, allocationWithMissingSourceIndex, 42L)); } - } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index b1fa8346e2c..ba6fe5b9a5a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -42,8 +42,8 @@ import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_NAME; -import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_UUID; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_NAME; +import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_UUID; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; @@ -151,8 +151,8 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase { .putInSyncAllocationIds(1, Collections.singleton("aid1")) .build(); metaData.put(sourceIndex, false); - indexSettings.put(INDEX_SHRINK_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID()); - indexSettings.put(INDEX_SHRINK_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName()); + indexSettings.put(INDEX_RESIZE_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID()); + indexSettings.put(INDEX_RESIZE_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName()); } else { sourceIndex = null; } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index f2491b2db1f..82ec987420b 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.discovery; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -29,6 +28,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -99,7 +99,7 @@ public class DiscoveryModuleTests extends ESTestCase { private DiscoveryModule newModule(Settings settings, List plugins) { return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService, - clusterApplier, clusterSettings, plugins, null); + clusterApplier, clusterSettings, plugins, null, createTempDir().toAbsolutePath()); } public void testDefaults() { diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java similarity index 63% rename from plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java rename to server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java index 5837d3bcdfe..8922a38ea1e 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/FileBasedUnicastHostsProviderTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.discovery.file; +package org.elasticsearch.discovery.zen; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -26,9 +26,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -50,16 +48,15 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; +import static org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE; -/** - * Tests for {@link FileBasedUnicastHostsProvider}. - */ public class FileBasedUnicastHostsProviderTests extends ESTestCase { + private boolean legacyLocation; private ThreadPool threadPool; private ExecutorService executorService; private MockTransportService transportService; + private Path configPath; @Before public void setUp() throws Exception { @@ -83,23 +80,20 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { @Before public void createTransportSvc() { - MockTcpTransport transport = - new MockTcpTransport(Settings.EMPTY, - threadPool, - BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), - new NamedWriteableRegistry(Collections.emptyList()), - new NetworkService(Collections.emptyList())) { - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9300) - ); - } - }; + final MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, + new NoneCircuitBreakerService(), + new NamedWriteableRegistry(Collections.emptyList()), + new NetworkService(Collections.emptyList())) { + @Override + public BoundTransportAddress boundAddress() { + return new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)}, + new TransportAddress(InetAddress.getLoopbackAddress(), 9300) + ); + } + }; transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null); + null); } public void testBuildDynamicNodes() throws Exception { @@ -114,18 +108,27 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { assertEquals(9300, nodes.get(2).getPort()); } + public void testBuildDynamicNodesLegacyLocation() throws Exception { + legacyLocation = true; + testBuildDynamicNodes(); + assertDeprecatedLocationWarning(); + } + public void testEmptyUnicastHostsFile() throws Exception { final List hostEntries = Collections.emptyList(); final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } - public void testUnicastHostsDoesNotExist() throws Exception { - final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - final Environment environment = TestEnvironment.newEnvironment(settings); - final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment); + public void testEmptyUnicastHostsFileLegacyLocation() throws Exception { + legacyLocation = true; + testEmptyUnicastHostsFile(); + assertDeprecatedLocationWarning(); + } + + public void testUnicastHostsDoesNotExist() { + final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); + final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, createTempDir().toAbsolutePath()); final List addresses = provider.buildDynamicHosts((hosts, limitPortCounts) -> UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, TimeValue.timeValueSeconds(10))); @@ -133,42 +136,60 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase { } public void testInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(0, addresses.size()); } + public void testInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + public void testSomeInvalidHostEntries() throws Exception { - List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); - List addresses = setupAndRunHostProvider(hostEntries); + final List hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301"); + final List addresses = setupAndRunHostProvider(hostEntries); assertEquals(1, addresses.size()); // only one of the two is valid and will be used assertEquals("192.168.0.1", addresses.get(0).getAddress()); assertEquals(9301, addresses.get(0).getPort()); } + public void testSomeInvalidHostEntriesLegacyLocation() throws Exception { + legacyLocation = true; + testSomeInvalidHostEntries(); + assertDeprecatedLocationWarning(); + } + // sets up the config dir, writes to the unicast hosts file in the config dir, // and then runs the file-based unicast host provider to get the list of discovery nodes private List setupAndRunHostProvider(final List hostEntries) throws IOException { final Path homeDir = createTempDir(); final Settings settings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) - .build(); - final Path configPath; + .put(Environment.PATH_HOME_SETTING.getKey(), homeDir) + .build(); if (randomBoolean()) { configPath = homeDir.resolve("config"); } else { configPath = createTempDir(); } - final Path discoveryFilePath = configPath.resolve("discovery-file"); + final Path discoveryFilePath = legacyLocation ? configPath.resolve("discovery-file") : configPath; Files.createDirectories(discoveryFilePath); final Path unicastHostsPath = discoveryFilePath.resolve(UNICAST_HOSTS_FILE); try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) { writer.write(String.join("\n", hostEntries)); } - return new FileBasedUnicastHostsProvider( - new Environment(settings, configPath)).buildDynamicHosts((hosts, limitPortCounts) -> - UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, - TimeValue.timeValueSeconds(10))); + return new FileBasedUnicastHostsProvider(settings, configPath).buildDynamicHosts((hosts, limitPortCounts) -> + UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, + TimeValue.timeValueSeconds(10))); + } + + private void assertDeprecatedLocationWarning() { + assertWarnings("Found dynamic hosts list at [" + + configPath.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE) + + "] but this path is deprecated. This list should be at [" + + configPath.resolve(UNICAST_HOSTS_FILE) + + "] instead. Support for the deprecated path will be removed in future."); } } diff --git a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java new file mode 100644 index 00000000000..2cb13af7a28 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.ingest; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.MockScriptEngine; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.core.Is.is; + +public class ConditionalProcessorTests extends ESTestCase { + + public void testChecksCondition() throws Exception { + String conditionalField = "field1"; + String scriptName = "conditionalScript"; + String trueValue = "truthy"; + ScriptService scriptService = new ScriptService(Settings.builder().build(), + Collections.singletonMap( + Script.DEFAULT_SCRIPT_LANG, + new MockScriptEngine( + Script.DEFAULT_SCRIPT_LANG, + Collections.singletonMap( + scriptName, ctx -> trueValue.equals(ctx.get(conditionalField)) + ) + ) + ), + new HashMap<>(ScriptModule.CORE_CONTEXTS) + ); + Map document = new HashMap<>(); + ConditionalProcessor processor = new ConditionalProcessor( + randomAlphaOfLength(10), + new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, + scriptName, Collections.emptyMap()), scriptService, + new Processor() { + @Override + public void execute(final IngestDocument ingestDocument) throws Exception { + ingestDocument.setFieldValue("foo", "bar"); + } + + @Override + public String getType() { + return null; + } + + @Override + public String getTag() { + return null; + } + }); + + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue(conditionalField, trueValue); + processor.execute(ingestDocument); + assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(trueValue)); + assertThat(ingestDocument.getSourceAndMetadata().get("foo"), is("bar")); + + String falseValue = "falsy"; + ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue(conditionalField, falseValue); + processor.execute(ingestDocument); + assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(falseValue)); + assertThat(ingestDocument.getSourceAndMetadata(), not(hasKey("foo"))); + } + + @SuppressWarnings("unchecked") + public void testActsOnImmutableData() throws Exception { + assertMutatingCtxThrows(ctx -> ctx.remove("foo")); + assertMutatingCtxThrows(ctx -> ctx.put("foo", "bar")); + assertMutatingCtxThrows(ctx -> ((List)ctx.get("listField")).add("bar")); + assertMutatingCtxThrows(ctx -> ((List)ctx.get("listField")).remove("bar")); + } + + private static void assertMutatingCtxThrows(Consumer> mutation) throws Exception { + String scriptName = "conditionalScript"; + CompletableFuture expectedException = new CompletableFuture<>(); + ScriptService scriptService = new ScriptService(Settings.builder().build(), + Collections.singletonMap( + Script.DEFAULT_SCRIPT_LANG, + new MockScriptEngine( + Script.DEFAULT_SCRIPT_LANG, + Collections.singletonMap( + scriptName, ctx -> { + try { + mutation.accept(ctx); + } catch (Exception e) { + expectedException.complete(e); + } + return false; + } + ) + ) + ), + new HashMap<>(ScriptModule.CORE_CONTEXTS) + ); + Map document = new HashMap<>(); + ConditionalProcessor processor = new ConditionalProcessor( + randomAlphaOfLength(10), + new Script( + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, + scriptName, Collections.emptyMap()), scriptService, null + ); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + ingestDocument.setFieldValue("listField", new ArrayList<>()); + processor.execute(ingestDocument); + Exception e = expectedException.get(); + assertThat(e, instanceOf(UnsupportedOperationException.class)); + assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage()); + } +} diff --git a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java index 61afd9ce2a4..f3a11a86e54 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConfigurationUtilsTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -38,6 +39,9 @@ import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; public class ConfigurationUtilsTests extends ESTestCase { + + private final ScriptService scriptService = mock(ScriptService.class); + private Map config; @Before @@ -120,7 +124,7 @@ public class ConfigurationUtilsTests extends ESTestCase { config.add(Collections.singletonMap("test_processor", emptyConfig)); config.add(Collections.singletonMap("test_processor", emptyConfig)); - List result = ConfigurationUtils.readProcessorConfigs(config, registry); + List result = ConfigurationUtils.readProcessorConfigs(config, scriptService, registry); assertThat(result.size(), equalTo(2)); assertThat(result.get(0), sameInstance(processor)); assertThat(result.get(1), sameInstance(processor)); @@ -129,7 +133,7 @@ public class ConfigurationUtilsTests extends ESTestCase { unknownTaggedConfig.put("tag", "my_unknown"); config.add(Collections.singletonMap("unknown_processor", unknownTaggedConfig)); ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, - () -> ConfigurationUtils.readProcessorConfigs(config, registry)); + () -> ConfigurationUtils.readProcessorConfigs(config, scriptService, registry)); assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]")); assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown"))); assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor"))); @@ -142,7 +146,10 @@ public class ConfigurationUtilsTests extends ESTestCase { Map secondUnknonwTaggedConfig = new HashMap<>(); secondUnknonwTaggedConfig.put("tag", "my_second_unknown"); config2.add(Collections.singletonMap("second_unknown_processor", secondUnknonwTaggedConfig)); - e = expectThrows(ElasticsearchParseException.class, () -> ConfigurationUtils.readProcessorConfigs(config2, registry)); + e = expectThrows( + ElasticsearchParseException.class, + () -> ConfigurationUtils.readProcessorConfigs(config2, scriptService, registry) + ); assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]")); assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown"))); assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor"))); @@ -166,17 +173,17 @@ public class ConfigurationUtilsTests extends ESTestCase { }); Object emptyConfig = Collections.emptyMap(); - Processor processor1 = ConfigurationUtils.readProcessor(registry, "script", emptyConfig); + Processor processor1 = ConfigurationUtils.readProcessor(registry, scriptService, "script", emptyConfig); assertThat(processor1, sameInstance(processor)); Object inlineScript = "test_script"; - Processor processor2 = ConfigurationUtils.readProcessor(registry, "script", inlineScript); + Processor processor2 = ConfigurationUtils.readProcessor(registry, scriptService, "script", inlineScript); assertThat(processor2, sameInstance(processor)); Object invalidConfig = 12L; ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, - () -> ConfigurationUtils.readProcessor(registry, "unknown_processor", invalidConfig)); + () -> ConfigurationUtils.readProcessor(registry, scriptService, "unknown_processor", invalidConfig)); assertThat(ex.getMessage(), equalTo("property isn't a map, but of type [" + invalidConfig.getClass().getName() + "]")); } diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java index cafdbcfb446..d6d7b4ffa81 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineFactoryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -32,11 +33,13 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; public class PipelineFactoryTests extends ESTestCase { private final Integer version = randomBoolean() ? randomInt() : null; private final String versionString = version != null ? Integer.toString(version) : null; + private final ScriptService scriptService = mock(ScriptService.class); public void testCreate() throws Exception { Map processorConfig0 = new HashMap<>(); @@ -48,7 +51,7 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.PROCESSORS_KEY, Arrays.asList(Collections.singletonMap("test", processorConfig0), Collections.singletonMap("test", processorConfig1))); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -64,7 +67,7 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); try { - Pipeline.create("_id", pipelineConfig, Collections.emptyMap()); + Pipeline.create("_id", pipelineConfig, Collections.emptyMap(), scriptService); fail("should fail, missing required [processors] field"); } catch (ElasticsearchParseException e) { assertThat(e.getMessage(), equalTo("[processors] required property is missing")); @@ -76,7 +79,7 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description"); pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.emptyList()); - Pipeline pipeline = Pipeline.create("_id", pipelineConfig, null); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, null, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -91,7 +94,7 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -109,7 +112,10 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.emptyList()); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("pipeline [_id] cannot have an empty on_failure option defined")); } @@ -121,7 +127,10 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("[on_failure] processors list cannot be empty")); } @@ -136,7 +145,7 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); - Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); @@ -156,7 +165,10 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry)); + Exception e = expectThrows( + ElasticsearchParseException.class, + () -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService) + ); assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); } @@ -169,7 +181,7 @@ public class PipelineFactoryTests extends ESTestCase { pipelineConfig.put(Pipeline.VERSION_KEY, versionString); pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig))); Map processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory()); - Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry); + Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService); assertThat(pipeline.getId(), equalTo("_id")); assertThat(pipeline.getDescription(), equalTo("_description")); assertThat(pipeline.getVersion(), equalTo(version)); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java new file mode 100644 index 00000000000..29b19739e75 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsActionTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + + +public class RestClusterGetSettingsActionTests extends ESTestCase { + + public void testFilterPersistentSettings() { + runTestFilterSettingsTest(MetaData.Builder::persistentSettings, ClusterGetSettingsResponse::getPersistentSettings); + } + + public void testFilterTransientSettings() { + runTestFilterSettingsTest(MetaData.Builder::transientSettings, ClusterGetSettingsResponse::getTransientSettings); + } + + private void runTestFilterSettingsTest( + final BiConsumer md, final Function s) { + final MetaData.Builder mdBuilder = new MetaData.Builder(); + final Settings settings = Settings.builder().put("foo.filtered", "bar").put("foo.non_filtered", "baz").build(); + md.accept(mdBuilder, settings); + final ClusterState.Builder builder = new ClusterState.Builder(ClusterState.EMPTY_STATE).metaData(mdBuilder); + final SettingsFilter filter = new SettingsFilter(Settings.EMPTY, Collections.singleton("foo.filtered")); + final Setting.Property[] properties = {Setting.Property.Dynamic, Setting.Property.Filtered, Setting.Property.NodeScope}; + final Set> settingsSet = Stream.concat( + ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), + Stream.concat( + Stream.of(Setting.simpleString("foo.filtered", properties)), + Stream.of(Setting.simpleString("foo.non_filtered", properties)))) + .collect(Collectors.toSet()); + final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, settingsSet); + final ClusterGetSettingsResponse response = + RestClusterGetSettingsAction.response(builder.build(), randomBoolean(), filter, clusterSettings, Settings.EMPTY); + assertFalse(s.apply(response).hasValue("foo.filtered")); + assertTrue(s.apply(response).hasValue("foo.non_filtered")); + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java index 5cc4e2ddc68..7790e8d6576 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourceSubPhaseTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.test.TestSearchContext; import java.io.IOException; import java.util.Collections; +import java.util.Map; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -78,6 +79,29 @@ public class FetchSourceSubPhaseTests extends ESTestCase { assertEquals(Collections.singletonMap("field","value"), hitContext.hit().getSourceAsMap()); } + public void testNestedSource() throws IOException { + Map expectedNested = Collections.singletonMap("nested2", Collections.singletonMap("field", "value0")); + XContentBuilder source = XContentFactory.jsonBuilder().startObject() + .field("field", "value") + .field("field2", "value2") + .field("nested1", expectedNested) + .endObject(); + FetchSubPhase.HitContext hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(expectedNested, hitContext.hit().getSourceAsMap()); + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0,null)); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, null, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.singletonMap("field", "value0"), hitContext.hit().getSourceAsMap()); + + hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null, + new SearchHit.NestedIdentity("nested1", 0, new SearchHit.NestedIdentity("nested2", 0, null))); + assertEquals(Collections.emptyMap(), hitContext.hit().getSourceAsMap()); + } + public void testSourceDisabled() throws IOException { FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null); assertNull(hitContext.hit().getSourceAsMap()); @@ -96,17 +120,29 @@ public class FetchSourceSubPhaseTests extends ESTestCase { } private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) { + return hitExecute(source, fetchSource, include, exclude, null); + } + + + private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude, + SearchHit.NestedIdentity nestedIdentity) { return hitExecuteMultiple(source, fetchSource, include == null ? Strings.EMPTY_ARRAY : new String[]{include}, - exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}); + exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}, nestedIdentity); } private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes) { + return hitExecuteMultiple(source, fetchSource, includes, excludes, null); + } + + private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes, + SearchHit.NestedIdentity nestedIdentity) { FetchSourceContext fetchSourceContext = new FetchSourceContext(fetchSource, includes, excludes); SearchContext searchContext = new FetchSourceSubPhaseTestSearchContext(fetchSourceContext, source == null ? null : BytesReference.bytes(source)); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - hitContext.reset(new SearchHit(1, null, null, null), null, 1, null); + final SearchHit searchHit = new SearchHit(1, null, null, nestedIdentity, null); + hitContext.reset(searchHit, null, 1, null); FetchSourceSubPhase phase = new FetchSourceSubPhase(); phase.hitExecute(searchContext, hitContext); return hitContext; diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 35dac2e99e0..c50e7cf066b 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -177,8 +177,11 @@ public class BootstrapForTesting { private static void addClassCodebase(Map codebases, String name, String classname) { try { Class clazz = BootstrapForTesting.class.getClassLoader().loadClass(classname); - if (codebases.put(name, clazz.getProtectionDomain().getCodeSource().getLocation()) != null) { - throw new IllegalStateException("Already added " + name + " codebase for testing"); + URL location = clazz.getProtectionDomain().getCodeSource().getLocation(); + if (location.toString().endsWith(".jar") == false) { + if (codebases.put(name, location) != null) { + throw new IllegalStateException("Already added " + name + " codebase for testing"); + } } } catch (ClassNotFoundException e) { // no class, fall through to not add. this can happen for any tests that do not include diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index 4e2b8259e6f..0ee5798efb3 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -96,6 +96,14 @@ public class MockScriptEngine implements ScriptEngine { } }; return context.factoryClazz.cast(factory); + } else if (context.instanceClazz.equals(IngestConditionalScript.class)) { + IngestConditionalScript.Factory factory = parameters -> new IngestConditionalScript(parameters) { + @Override + public boolean execute(Map ctx) { + return (boolean) script.apply(ctx); + } + }; + return context.factoryClazz.cast(factory); } else if (context.instanceClazz.equals(UpdateScript.class)) { UpdateScript.Factory factory = parameters -> new UpdateScript(parameters) { @Override diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 6cca05c4a0e..99e62532e2d 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -685,9 +685,8 @@ setups['sensor_prefab_data'] = ''' page_size: 1000 groups: date_histogram: - delay: "7d" field: "timestamp" - interval: "1h" + interval: "7d" time_zone: "UTC" terms: fields: diff --git a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc b/x-pack/docs/en/rest-api/rollup/put-job.asciidoc index 1449acadc63..27889d985b8 100644 --- a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/put-job.asciidoc @@ -43,6 +43,8 @@ started with the <>. `metrics`:: (object) Defines the metrics that should be collected for each grouping tuple. See <>. +For more details about the job configuration, see <>. + ==== Authorization You must have `manage` or `manage_rollup` cluster privileges to use this API. diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc index 2ba92b6b59e..f937f28601a 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc +++ b/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc @@ -23,7 +23,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -99,7 +99,7 @@ fields will then be available later for aggregating into buckets. For example, "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "interval": "60m", "delay": "7d" }, "terms": { @@ -133,9 +133,9 @@ The `date_histogram` group has several parameters: The date field that is to be rolled up. `interval` (required):: - The interval of time buckets to be generated when rolling up. E.g. `"1h"` will produce hourly rollups. This follows standard time formatting - syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"1h"`) - intervals are configured, <> can execute aggregations with 1hr or greater (weekly, monthly, etc) intervals. + The interval of time buckets to be generated when rolling up. E.g. `"60m"` will produce 60 minute (hourly) rollups. This follows standard time formatting + syntax as used elsewhere in Elasticsearch. The `interval` defines the _minimum_ interval that can be aggregated only. If hourly (`"60m"`) + intervals are configured, <> can execute aggregations with 60m or greater (weekly, monthly, etc) intervals. So define the interval as the smallest unit that you wish to later query. Note: smaller, more granular intervals take up proportionally more space. @@ -154,6 +154,46 @@ The `date_histogram` group has several parameters: to be stored with a specific timezone. By default, rollup documents are stored in `UTC`, but this can be changed with the `time_zone` parameter. +.Calendar vs Fixed time intervals +********************************** +Elasticsearch understands both "calendar" and "fixed" time intervals. Fixed time intervals are fairly easy to understand; +`"60s"` means sixty seconds. But what does `"1M` mean? One month of time depends on which month we are talking about, +some months are longer or shorter than others. This is an example of "calendar" time, and the duration of that unit +depends on context. Calendar units are also affected by leap-seconds, leap-years, etc. + +This is important because the buckets generated by Rollup will be in either calendar or fixed intervals, and will limit +how you can query them later (see <>. + +We recommend sticking with "fixed" time intervals, since they are easier to understand and are more flexible at query +time. It will introduce some drift in your data during leap-events, and you will have to think about months in a fixed +quantity (30 days) instead of the actual calendar length... but it is often easier than dealing with calendar units +at query time. + +Multiples of units are always "fixed" (e.g. `"2h"` is always the fixed quantity `7200` seconds. Single units can be +fixed or calendar depending on the unit: + +[options="header"] +|======= +|Unit |Calendar |Fixed +|millisecond |NA |`1ms`, `10ms`, etc +|second |NA |`1s`, `10s`, etc +|minute |`1m` |`2m`, `10m`, etc +|hour |`1h` |`2h`, `10h`, etc +|day |`1d` |`2d`, `10d`, etc +|week |`1w` |NA +|month |`1M` |NA +|quarter |`1q` |NA +|year |`1y` |NA +|======= + +For some units where there are both fixed and calendar, you may need to express the quantity in terms of the next +smaller unit. For example, if you want a fixed day (not a calendar day), you should specify `24h` instead of `1d`. +Similarly, if you want fixed hours, specify `60m` instead of `1h`. This is because the single quantity entails +calendar time, and limits you to querying by calendar time in the future. + + +********************************** + ===== Terms The `terms` group can be used on `keyword` or numeric fields, to allow bucketing via the `terms` aggregation at a later point. The `terms` diff --git a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc b/x-pack/docs/en/rollup/rollup-getting-started.asciidoc index 24f68dddd81..b6c913d7d34 100644 --- a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc +++ b/x-pack/docs/en/rollup/rollup-getting-started.asciidoc @@ -37,8 +37,7 @@ PUT _xpack/rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", - "delay": "7d" + "interval": "60m" }, "terms": { "fields": ["node"] @@ -66,7 +65,7 @@ The `cron` parameter controls when and how often the job activates. When a roll from where it left off after the last activation. So if you configure the cron to run every 30 seconds, the job will process the last 30 seconds worth of data that was indexed into the `sensor-*` indices. -If instead the cron was configured to run once a day at midnight, the job would process the last 24hours worth of data. The choice is largely +If instead the cron was configured to run once a day at midnight, the job would process the last 24 hours worth of data. The choice is largely preference, based on how "realtime" you want the rollups, and if you wish to process continuously or move it to off-peak hours. Next, we define a set of `groups` and `metrics`. The metrics are fairly straightforward: we want to save the min/max/sum of the `temperature` @@ -79,7 +78,7 @@ It also allows us to run terms aggregations on the `node` field. .Date histogram interval vs cron schedule ********************************** You'll note that the job's cron is configured to run every 30 seconds, but the date_histogram is configured to -rollup at hourly intervals. How do these relate? +rollup at 60 minute intervals. How do these relate? The date_histogram controls the granularity of the saved data. Data will be rolled up into hourly intervals, and you will be unable to query with finer granularity. The cron simply controls when the process looks for new data to rollup. Every 30 seconds it will see @@ -223,70 +222,71 @@ Which returns a corresponding response: [source,js] ---- { - "took" : 93, - "timed_out" : false, - "terminated_early" : false, - "_shards" : ... , - "hits" : { - "total" : 0, - "max_score" : 0.0, - "hits" : [ ] - }, - "aggregations" : { - "timeline" : { - "meta" : { }, - "buckets" : [ - { - "key_as_string" : "2018-01-18T00:00:00.000Z", - "key" : 1516233600000, - "doc_count" : 6, - "nodes" : { - "doc_count_error_upper_bound" : 0, - "sum_other_doc_count" : 0, - "buckets" : [ - { - "key" : "a", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 5.1499998569488525 - } - }, - { - "key" : "b", - "doc_count" : 2, - "max_temperature" : { - "value" : 201.0 - }, - "avg_voltage" : { - "value" : 5.700000047683716 - } - }, - { - "key" : "c", - "doc_count" : 2, - "max_temperature" : { - "value" : 202.0 - }, - "avg_voltage" : { - "value" : 4.099999904632568 - } - } - ] - } - } - ] - } - } + "took" : 93, + "timed_out" : false, + "terminated_early" : false, + "_shards" : ... , + "hits" : { + "total" : 0, + "max_score" : 0.0, + "hits" : [ ] + }, + "aggregations" : { + "timeline" : { + "meta" : { }, + "buckets" : [ + { + "key_as_string" : "2018-01-18T00:00:00.000Z", + "key" : 1516233600000, + "doc_count" : 6, + "nodes" : { + "doc_count_error_upper_bound" : 0, + "sum_other_doc_count" : 0, + "buckets" : [ + { + "key" : "a", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 5.1499998569488525 + } + }, + { + "key" : "b", + "doc_count" : 2, + "max_temperature" : { + "value" : 201.0 + }, + "avg_voltage" : { + "value" : 5.700000047683716 + } + }, + { + "key" : "c", + "doc_count" : 2, + "max_temperature" : { + "value" : 202.0 + }, + "avg_voltage" : { + "value" : 4.099999904632568 + } + } + ] + } + } + ] + } + } } + ---- // TESTRESPONSE[s/"took" : 93/"took" : $body.$_path/] // TESTRESPONSE[s/"_shards" : \.\.\. /"_shards" : $body.$_path/] In addition to being more complicated (date histogram and a terms aggregation, plus an additional average metric), you'll notice -the date_histogram uses a `7d` interval instead of `1h`. +the date_histogram uses a `7d` interval instead of `60m`. [float] === Conclusion diff --git a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc b/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc index 57ba23eebcc..99f19a179ed 100644 --- a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc +++ b/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc @@ -80,9 +80,25 @@ The response will tell you that the field and aggregation were not possible, bec [float] === Interval Granularity -Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. If data is rolled up at hourly -intervals, the <> API can aggregate on any time interval hourly or greater. Intervals that are less than an hour will throw -an exception, since the data simply doesn't exist for finer granularities. +Rollups are stored at a certain granularity, as defined by the `date_histogram` group in the configuration. This means you +can only search/aggregate the rollup data with an interval that is greater-than or equal to the configured rollup interval. + +For example, if data is rolled up at hourly intervals, the <> API can aggregate on any time interval +hourly or greater. Intervals that are less than an hour will throw an exception, since the data simply doesn't +exist for finer granularities. + +[[rollup-search-limitations-intervals]] +.Requests must be multiples of the config +********************************** +Perhaps not immediately apparent, but the interval specified in an aggregation request must be a whole +multiple of the configured interval. If the job was configured to rollup on `3d` intervals, you can only +query and aggregate on multiples of three (`3d`, `6d`, `9d`, etc). + +A non-multiple wouldn't work, since the rolled up data wouldn't cleanly "overlap" with the buckets generated +by the aggregation, leading to incorrect results. + +For that reason, an error is thrown if a whole multiple of the configured interval isn't found. +********************************** Because the RollupSearch endpoint can "upsample" intervals, there is no need to configure jobs with multiple intervals (hourly, daily, etc). It's recommended to just configure a single job with the smallest granularity that is needed, and allow the search endpoint to upsample diff --git a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc index eb8e985a65b..06e70b03673 100644 --- a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc @@ -77,7 +77,17 @@ bin/elasticsearch-keystore add xpack.security.http.ssl.secure_key_passphrase . Restart {es}. -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== diff --git a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc index c186aebbe24..c2306545536 100644 --- a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc @@ -95,7 +95,17 @@ vice-versa). After enabling TLS you must restart all nodes in order to maintain communication across the cluster. -- -NOTE: All TLS-related node settings are considered to be highly sensitive and +[NOTE] +=============================== +* All TLS-related node settings are considered to be highly sensitive and therefore are not exposed via the {ref}/cluster-nodes-info.html#cluster-nodes-info[nodes info API] For more information about any of these settings, see <>. + +* {es} monitors all files such as certificates, keys, keystores, or truststores +that are configured as values of TLS-related node settings. If you update any of +these files (for example, when your hostnames change or your certificates are +due to expire), {es} reloads them. The files are polled for changes at +a frequency determined by the global {es} `resource.reload.interval.high` +setting, which defaults to 5 seconds. +=============================== diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java new file mode 100644 index 00000000000..ee0c0de97e0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -0,0 +1,385 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.apache.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An abstract class that builds an index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, + * it will create the index from the source index up to the last complete bucket that is allowed to be built (based on job position). + * Only one background job can run simultaneously and {@link #onFinish()} is called when the job + * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is + * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when + * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * + * In a nutshell this is a 2 cycle engine: 1st it sends a query, 2nd it indexes documents based on the response, sends the next query, + * indexes, queries, indexes, ... until a condition lets the engine pause until the source provides new input. + * + * @param Type that defines a job position to be defined by the implementation. + */ +public abstract class AsyncTwoPhaseIndexer { + private static final Logger logger = Logger.getLogger(AsyncTwoPhaseIndexer.class.getName()); + + private final JobStats stats; + + private final AtomicReference state; + private final AtomicReference position; + private final Executor executor; + + protected AsyncTwoPhaseIndexer(Executor executor, AtomicReference initialState, + JobPosition initialPosition, JobStats jobStats) { + this.executor = executor; + this.state = initialState; + this.position = new AtomicReference<>(initialPosition); + this.stats = jobStats; + } + + /** + * Get the current state of the indexer. + */ + public IndexerState getState() { + return state.get(); + } + + /** + * Get the current position of the indexer. + */ + public JobPosition getPosition() { + return position.get(); + } + + /** + * Get the stats of this indexer. + */ + public JobStats getStats() { + return stats; + } + + /** + * Sets the internal state to {@link IndexerState#STARTED} if the previous state + * was {@link IndexerState#STOPPED}. Setting the state to STARTED allows a job + * to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. + * + * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState start() { + state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); + return state.get(); + } + + /** + * Sets the internal state to {@link IndexerState#STOPPING} if an async job is + * running in the background and in such case {@link #onFinish()} will be called + * as soon as the background job detects that the indexer is stopped. If there + * is no job running when this function is called, the state is directly set to + * {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. + * + * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the + * job was already aborted). + */ + public synchronized IndexerState stop() { + IndexerState currentState = state.updateAndGet(previousState -> { + if (previousState == IndexerState.INDEXING) { + return IndexerState.STOPPING; + } else if (previousState == IndexerState.STARTED) { + return IndexerState.STOPPED; + } else { + return previousState; + } + }); + return currentState; + } + + /** + * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if + * an async job is running in the background and in such case {@link #onAbort} + * will be called as soon as the background job detects that the indexer is + * aborted. If there is no job running when this function is called, it returns + * true and {@link #onAbort()} will never be called. + * + * @return true if the indexer is aborted, false if a background job is running + * and abort is delayed. + */ + public synchronized boolean abort() { + IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); + return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; + } + + /** + * Triggers a background job that builds the index asynchronously iff + * there is no other job that runs and the indexer is started + * ({@link IndexerState#STARTED}. + * + * @param now + * The current time in milliseconds (used to limit the job to + * complete buckets) + * @return true if a job has been triggered, false otherwise + */ + public synchronized boolean maybeTriggerAsyncJob(long now) { + final IndexerState currentState = state.get(); + switch (currentState) { + case INDEXING: + case STOPPING: + case ABORTING: + logger.warn("Schedule was triggered for job [" + getJobId() + "], but prior indexer is still running."); + return false; + + case STOPPED: + logger.debug("Schedule was triggered for job [" + getJobId() + "] but job is stopped. Ignoring trigger."); + return false; + + case STARTED: + logger.debug("Schedule was triggered for job [" + getJobId() + "], state: [" + currentState + "]"); + stats.incrementNumInvocations(1); + onStartJob(now); + + if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { + // fire off the search. Note this is async, the method will return from here + executor.execute(() -> doNextSearch(buildSearchRequest(), + ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); + logger.debug("Beginning to index [" + getJobId() + "], state: [" + currentState + "]"); + return true; + } else { + logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); + return false; + } + + default: + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Job encountered an illegal state [" + currentState + "]"); + } + } + + /** + * Called to get the Id of the job, used for logging. + * + * @return a string with the id of the job + */ + protected abstract String getJobId(); + + /** + * Called to process a response from the 1 search request in order to turn it into a {@link IterationResult}. + * + * @param searchResponse response from the search phase. + * @return Iteration object to be passed to indexing phase. + */ + protected abstract IterationResult doProcess(SearchResponse searchResponse); + + /** + * Called to build the next search request. + * + * @return SearchRequest to be passed to the search phase. + */ + protected abstract SearchRequest buildSearchRequest(); + + /** + * Called at startup after job has been triggered using {@link #maybeTriggerAsyncJob(long)} and the + * internal state is {@link IndexerState#STARTED}. + * + * @param now The current time in milliseconds passed through from {@link #maybeTriggerAsyncJob(long)} + */ + protected abstract void onStartJob(long now); + + /** + * Executes the {@link SearchRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The search request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); + + /** + * Executes the {@link BulkRequest} and calls nextPhase with the + * response or the exception if an error occurs. + * + * @param request + * The bulk request to execute + * @param nextPhase + * Listener for the next phase + */ + protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); + + /** + * Called periodically during the execution of a background job. Implementation + * should persists the state somewhere and continue the execution asynchronously + * using next. + * + * @param state + * The current state of the indexer + * @param position + * The current position of the indexer + * @param next + * Runnable for the next phase + */ + protected abstract void doSaveState(IndexerState state, JobPosition position, Runnable next); + + /** + * Called when a failure occurs in an async job causing the execution to stop. + * + * @param exc + * The exception + */ + protected abstract void onFailure(Exception exc); + + /** + * Called when a background job finishes. + */ + protected abstract void onFinish(); + + /** + * Called when a background job detects that the indexer is aborted causing the + * async execution to stop. + */ + protected abstract void onAbort(); + + private void finishWithFailure(Exception exc) { + doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); + } + + private IndexerState finishAndSetState() { + return state.updateAndGet(prev -> { + switch (prev) { + case INDEXING: + // ready for another job + return IndexerState.STARTED; + + case STOPPING: + // must be started again + return IndexerState.STOPPED; + + case ABORTING: + // abort and exit + onAbort(); + return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first + + case STOPPED: + // No-op. Shouldn't really be possible to get here (should have to go through + // STOPPING + // first which will be handled) but is harmless to no-op and we don't want to + // throw exception here + return IndexerState.STOPPED; + + default: + // any other state is unanticipated at this point + throw new IllegalStateException("Indexer job encountered an illegal state [" + prev + "]"); + } + }); + } + + private void onSearchResponse(SearchResponse searchResponse) { + try { + if (checkState(getState()) == false) { + return; + } + if (searchResponse.getShardFailures().length != 0) { + throw new RuntimeException("Shard failures encountered while running indexer for job [" + getJobId() + "]: " + + Arrays.toString(searchResponse.getShardFailures())); + } + + stats.incrementNumPages(1); + IterationResult iterationResult = doProcess(searchResponse); + + if (iterationResult.isDone()) { + logger.debug("Finished indexing for job [" + getJobId() + "], saving state and shutting down."); + + // Change state first, then try to persist. This prevents in-progress + // STOPPING/ABORTING from + // being persisted as STARTED but then stop the job + doSaveState(finishAndSetState(), position.get(), this::onFinish); + return; + } + + final List docs = iterationResult.getToIndex(); + final BulkRequest bulkRequest = new BulkRequest(); + docs.forEach(bulkRequest::add); + + // TODO this might be a valid case, e.g. if implementation filters + assert bulkRequest.requests().size() > 0; + + doNextBulk(bulkRequest, ActionListener.wrap(bulkResponse -> { + // TODO we should check items in the response and move after accordingly to + // resume the failing buckets ? + if (bulkResponse.hasFailures()) { + logger.warn("Error while attempting to bulk index documents: " + bulkResponse.buildFailureMessage()); + } + stats.incrementNumOutputDocuments(bulkResponse.getItems().length); + if (checkState(getState()) == false) { + return; + } + + JobPosition newPosition = iterationResult.getPosition(); + position.set(newPosition); + + onBulkResponse(bulkResponse, newPosition); + }, exc -> finishWithFailure(exc))); + } catch (Exception e) { + finishWithFailure(e); + } + } + + private void onBulkResponse(BulkResponse response, JobPosition position) { + try { + + ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); + // TODO probably something more intelligent than every-50 is needed + if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { + doSaveState(IndexerState.INDEXING, position, () -> doNextSearch(buildSearchRequest(), listener)); + } else { + doNextSearch(buildSearchRequest(), listener); + } + } catch (Exception e) { + finishWithFailure(e); + } + } + + /** + * Checks the {@link IndexerState} and returns false if the execution should be + * stopped. + */ + private boolean checkState(IndexerState currentState) { + switch (currentState) { + case INDEXING: + // normal state; + return true; + + case STOPPING: + logger.info("Indexer job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); + doSaveState(finishAndSetState(), getPosition(), () -> { + }); + return false; + + case STOPPED: + return false; + + case ABORTING: + logger.info("Requested shutdown of indexer for job [" + getJobId() + "]"); + onAbort(); + return false; + + default: + // Anything other than indexing, aborting or stopping is unanticipated + logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); + throw new IllegalStateException("Indexer job encountered an illegal state [" + currentState + "]"); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java new file mode 100644 index 00000000000..2453504a5ba --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class holds the runtime statistics of a job. The stats are not used by any internal process + * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the + * allocated task is shutdown/restarted on a different node all the stats will reset. + */ +public abstract class IndexerJobStats implements ToXContentObject, Writeable { + + public static final ParseField NAME = new ParseField("job_stats"); + + protected long numPages = 0; + protected long numInputDocuments = 0; + protected long numOuputDocuments = 0; + protected long numInvocations = 0; + + public IndexerJobStats() { + } + + public IndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + this.numPages = numPages; + this.numInputDocuments = numInputDocuments; + this.numOuputDocuments = numOuputDocuments; + this.numInvocations = numInvocations; + } + + public IndexerJobStats(StreamInput in) throws IOException { + this.numPages = in.readVLong(); + this.numInputDocuments = in.readVLong(); + this.numOuputDocuments = in.readVLong(); + this.numInvocations = in.readVLong(); + } + + public long getNumPages() { + return numPages; + } + + public long getNumDocuments() { + return numInputDocuments; + } + + public long getNumInvocations() { + return numInvocations; + } + + public long getOutputDocuments() { + return numOuputDocuments; + } + + public void incrementNumPages(long n) { + assert(n >= 0); + numPages += n; + } + + public void incrementNumDocuments(long n) { + assert(n >= 0); + numInputDocuments += n; + } + + public void incrementNumInvocations(long n) { + assert(n >= 0); + numInvocations += n; + } + + public void incrementNumOutputDocuments(long n) { + assert(n >= 0); + numOuputDocuments += n; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(numPages); + out.writeVLong(numInputDocuments); + out.writeVLong(numOuputDocuments); + out.writeVLong(numInvocations); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + IndexerJobStats that = (IndexerJobStats) other; + + return Objects.equals(this.numPages, that.numPages) + && Objects.equals(this.numInputDocuments, that.numInputDocuments) + && Objects.equals(this.numOuputDocuments, that.numOuputDocuments) + && Objects.equals(this.numInvocations, that.numInvocations); + } + + @Override + public int hashCode() { + return Objects.hash(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java similarity index 97% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java index 6e211c1df9e..1b6b9a943cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/IndexerState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerState.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java new file mode 100644 index 00000000000..1261daf185b --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IterationResult.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.index.IndexRequest; + +import java.util.List; + +/** + * Result object to hold the result of 1 iteration of iterative indexing. + * Acts as an interface between the implementation and the generic indexer. + */ +public class IterationResult { + + private final boolean isDone; + private final JobPosition position; + private final List toIndex; + + /** + * Constructor for the result of 1 iteration. + * + * @param toIndex the list of requests to be indexed + * @param position the extracted, persistable position of the job required for the search phase + * @param isDone true if source is exhausted and job should go to sleep + * + * Note: toIndex.empty() != isDone due to possible filtering in the specific implementation + */ + public IterationResult(List toIndex, JobPosition position, boolean isDone) { + this.toIndex = toIndex; + this.position = position; + this.isDone = isDone; + } + + /** + * Returns true if this indexing iteration is done and job should go into sleep mode. + */ + public boolean isDone() { + return isDone; + } + + /** + * Return the position of the job, a generic to be passed to the next query construction. + * + * @return the position + */ + public JobPosition getPosition() { + return position; + } + + /** + * List of requests to be passed to bulk indexing. + * + * @return List of index requests. + */ + public List getToIndex() { + return toIndex; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java index 50f79315085..7bbbf07e6dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -26,8 +26,8 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.rollup.RollupField; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import java.io.IOException; @@ -174,7 +174,14 @@ public class GetRollupJobsAction extends Action { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(JOBS.getPreferredName(), jobs); + + // XContentBuilder does not support passing the params object for Iterables + builder.field(JOBS.getPreferredName()); + builder.startArray(); + for (JobWrapper job : jobs) { + job.toXContent(builder, params); + } + builder.endArray(); builder.endObject(); return builder; } @@ -204,20 +211,20 @@ public class GetRollupJobsAction extends Action { public static class JobWrapper implements Writeable, ToXContentObject { private final RollupJobConfig job; - private final RollupJobStats stats; + private final RollupIndexerJobStats stats; private final RollupJobStatus status; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, a -> new JobWrapper((RollupJobConfig) a[0], - (RollupJobStats) a[1], (RollupJobStatus)a[2])); + (RollupIndexerJobStats) a[1], (RollupJobStatus)a[2])); static { PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> RollupJobConfig.fromXContent(p, null), CONFIG); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStats.PARSER::apply, STATS); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupIndexerJobStats.PARSER::apply, STATS); PARSER.declareObject(ConstructingObjectParser.constructorArg(), RollupJobStatus.PARSER::apply, STATUS); } - public JobWrapper(RollupJobConfig job, RollupJobStats stats, RollupJobStatus status) { + public JobWrapper(RollupJobConfig job, RollupIndexerJobStats stats, RollupJobStatus status) { this.job = job; this.stats = stats; this.status = status; @@ -225,7 +232,7 @@ public class GetRollupJobsAction extends Action { public JobWrapper(StreamInput in) throws IOException { this.job = new RollupJobConfig(in); - this.stats = new RollupJobStats(in); + this.stats = new RollupIndexerJobStats(in); this.status = new RollupJobStatus(in); } @@ -240,7 +247,7 @@ public class GetRollupJobsAction extends Action { return job; } - public RollupJobStats getStats() { + public RollupIndexerJobStats getStats() { return stats; } @@ -254,7 +261,7 @@ public class GetRollupJobsAction extends Action { builder.field(CONFIG.getPreferredName()); job.toXContent(builder, params); builder.field(STATUS.getPreferredName(), status); - builder.field(STATS.getPreferredName(), stats); + builder.field(STATS.getPreferredName(), stats, params); builder.endObject(); return builder; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java new file mode 100644 index 00000000000..87915671b79 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupIndexerJobStats.java @@ -0,0 +1,70 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rollup.job; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.indexing.IndexerJobStats; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The Rollup specialization of stats for the AsyncTwoPhaseIndexer. + * Note: instead of `documents_indexed`, this XContent show `rollups_indexed` + */ +public class RollupIndexerJobStats extends IndexerJobStats { + private static ParseField NUM_PAGES = new ParseField("pages_processed"); + private static ParseField NUM_INPUT_DOCUMENTS = new ParseField("documents_processed"); + private static ParseField NUM_OUTPUT_DOCUMENTS = new ParseField("rollups_indexed"); + private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME.getPreferredName(), + args -> new RollupIndexerJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); + + static { + PARSER.declareLong(constructorArg(), NUM_PAGES); + PARSER.declareLong(constructorArg(), NUM_INPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_OUTPUT_DOCUMENTS); + PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); + } + + public RollupIndexerJobStats() { + super(); + } + + public RollupIndexerJobStats(long numPages, long numInputDocuments, long numOuputDocuments, long numInvocations) { + super(numPages, numInputDocuments, numOuputDocuments, numInvocations); + } + + public RollupIndexerJobStats(StreamInput in) throws IOException { + super(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUM_PAGES.getPreferredName(), numPages); + builder.field(NUM_INPUT_DOCUMENTS.getPreferredName(), numInputDocuments); + builder.field(NUM_OUTPUT_DOCUMENTS.getPreferredName(), numOuputDocuments); + builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); + builder.endObject(); + return builder; + } + + public static RollupIndexerJobStats fromXContent(XContentParser parser) { + try { + return PARSER.parse(parser, null); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java deleted file mode 100644 index 06cfb520af5..00000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStats.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - -/** - * This class holds the runtime statistics of a job. The stats are not used by any internal process - * and are only for external monitoring/reference. Statistics are not persisted with the job, so if the - * allocated task is shutdown/restarted on a different node all the stats will reset. - */ -public class RollupJobStats implements ToXContentObject, Writeable { - - public static final ParseField NAME = new ParseField("job_stats"); - - private static ParseField NUM_PAGES = new ParseField("pages_processed"); - private static ParseField NUM_DOCUMENTS = new ParseField("documents_processed"); - private static ParseField NUM_ROLLUPS = new ParseField("rollups_indexed"); - private static ParseField NUM_INVOCATIONS = new ParseField("trigger_count"); - - private long numPages = 0; - private long numDocuments = 0; - private long numRollups = 0; - private long numInvocations = 0; - - public static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(NAME.getPreferredName(), - args -> new RollupJobStats((long) args[0], (long) args[1], (long) args[2], (long) args[3])); - - static { - PARSER.declareLong(constructorArg(), NUM_PAGES); - PARSER.declareLong(constructorArg(), NUM_DOCUMENTS); - PARSER.declareLong(constructorArg(), NUM_ROLLUPS); - PARSER.declareLong(constructorArg(), NUM_INVOCATIONS); - } - - public RollupJobStats() { - } - - public RollupJobStats(long numPages, long numDocuments, long numRollups, long numInvocations) { - this.numPages = numPages; - this.numDocuments = numDocuments; - this.numRollups = numRollups; - this.numInvocations = numInvocations; - } - - public RollupJobStats(StreamInput in) throws IOException { - this.numPages = in.readVLong(); - this.numDocuments = in.readVLong(); - this.numRollups = in.readVLong(); - this.numInvocations = in.readVLong(); - } - - public long getNumPages() { - return numPages; - } - - public long getNumDocuments() { - return numDocuments; - } - - public long getNumInvocations() { - return numInvocations; - } - - public long getNumRollups() { - return numRollups; - } - - public void incrementNumPages(long n) { - assert(n >= 0); - numPages += n; - } - - public void incrementNumDocuments(long n) { - assert(n >= 0); - numDocuments += n; - } - - public void incrementNumInvocations(long n) { - assert(n >= 0); - numInvocations += n; - } - - public void incrementNumRollups(long n) { - assert(n >= 0); - numRollups += n; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(numPages); - out.writeVLong(numDocuments); - out.writeVLong(numRollups); - out.writeVLong(numInvocations); - } - - public static RollupJobStats fromXContent(XContentParser parser) { - try { - return PARSER.parse(parser, null); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(NUM_PAGES.getPreferredName(), numPages); - builder.field(NUM_DOCUMENTS.getPreferredName(), numDocuments); - builder.field(NUM_ROLLUPS.getPreferredName(), numRollups); - builder.field(NUM_INVOCATIONS.getPreferredName(), numInvocations); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - - if (other == null || getClass() != other.getClass()) { - return false; - } - - RollupJobStats that = (RollupJobStats) other; - - return Objects.equals(this.numPages, that.numPages) - && Objects.equals(this.numDocuments, that.numDocuments) - && Objects.equals(this.numRollups, that.numRollups) - && Objects.equals(this.numInvocations, that.numInvocations); - } - - @Override - public int hashCode() { - return Objects.hash(numPages, numDocuments, numRollups, numInvocations); - } - -} - diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 640385c9c80..0a2f046907c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.tasks.Task; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; import java.util.HashMap; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java index 4d57da06b92..ed31f0cc020 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenRequest.java @@ -186,7 +186,7 @@ public final class CreateTokenRequest extends ActionRequest { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().before(Version.V_7_0_0_alpha1) && GrantType.CLIENT_CREDENTIALS.getValue().equals(grantType)) { + if (out.getVersion().before(Version.V_6_5_0) && GrantType.CLIENT_CREDENTIALS.getValue().equals(grantType)) { throw new IllegalArgumentException("a request with the client_credentials grant_type cannot be sent to version [" + out.getVersion() + "]"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index 43924735678..30111a92431 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -59,7 +59,7 @@ public final class CreateTokenResponse extends ActionResponse implements ToXCont out.writeString(tokenString); out.writeTimeValue(expiresIn); out.writeOptionalString(scope); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { // TODO change to V_6_5_0 after backport + if (out.getVersion().onOrAfter(Version.V_6_5_0)) { out.writeOptionalString(refreshToken); } else if (out.getVersion().onOrAfter(Version.V_6_2_0)) { if (refreshToken == null) { @@ -76,7 +76,7 @@ public final class CreateTokenResponse extends ActionResponse implements ToXCont tokenString = in.readString(); expiresIn = in.readTimeValue(); scope = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { // TODO change to V_6_5_0 after backport + if (in.getVersion().onOrAfter(Version.V_6_5_0)) { refreshToken = in.readOptionalString(); } else if (in.getVersion().onOrAfter(Version.V_6_2_0)) { refreshToken = in.readString(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java new file mode 100644 index 00000000000..2662e05570c --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.indexing; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchResponseSections; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.Executor; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class AsyncTwoPhaseIndexerTests extends ESTestCase { + + AtomicBoolean isFinished = new AtomicBoolean(false); + + private class MockIndexer extends AsyncTwoPhaseIndexer { + + // test the execution order + private int step; + + protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition) { + super(executor, initialState, initialPosition, new MockJobStats()); + } + + @Override + protected String getJobId() { + return "mock"; + } + + @Override + protected IterationResult doProcess(SearchResponse searchResponse) { + assertThat(step, equalTo(3)); + ++step; + return new IterationResult(Collections.emptyList(), 3, true); + } + + @Override + protected SearchRequest buildSearchRequest() { + assertThat(step, equalTo(1)); + ++step; + return null; + } + + @Override + protected void onStartJob(long now) { + assertThat(step, equalTo(0)); + ++step; + } + + @Override + protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { + assertThat(step, equalTo(2)); + ++step; + final SearchResponseSections sections = new SearchResponseSections(new SearchHits(new SearchHit[0], 0, 0), null, null, false, + null, null, 1); + + nextPhase.onResponse(new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null)); + } + + @Override + protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { + fail("should not be called"); + } + + @Override + protected void doSaveState(IndexerState state, Integer position, Runnable next) { + assertThat(step, equalTo(4)); + ++step; + next.run(); + } + + @Override + protected void onFailure(Exception exc) { + fail(exc.getMessage()); + } + + @Override + protected void onFinish() { + assertThat(step, equalTo(5)); + ++step; + isFinished.set(true); + } + + @Override + protected void onAbort() { + } + + public int getStep() { + return step; + } + + } + + private static class MockJobStats extends IndexerJobStats { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return null; + } + } + + public void testStateMachine() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + + try { + + MockIndexer indexer = new MockIndexer(executor, state, 2); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + assertThat(indexer.getPosition(), equalTo(2)); + ESTestCase.awaitBusy(() -> isFinished.get()); + assertThat(indexer.getStep(), equalTo(6)); + assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); + assertThat(indexer.getStats().getNumPages(), equalTo(1L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); + assertTrue(indexer.abort()); + } finally { + executor.shutdownNow(); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java similarity index 98% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java index ec17a37e23b..329800c2f1a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/IndexerStateEnumTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/IndexerStateEnumTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.rollup.job; +package org.elasticsearch.xpack.core.indexing; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java index a0df63bc38d..1ab6e6a55d4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/JobWrapperSerializingTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.rollup.job; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; @@ -40,7 +41,8 @@ public class JobWrapperSerializingTests extends AbstractSerializingTestCase { + + @Override + protected RollupIndexerJobStats createTestInstance() { + return randomStats(); + } + + @Override + protected Writeable.Reader instanceReader() { + return RollupIndexerJobStats::new; + } + + @Override + protected RollupIndexerJobStats doParseInstance(XContentParser parser) { + return RollupIndexerJobStats.fromXContent(parser); + } + + public static RollupIndexerJobStats randomStats() { + return new RollupIndexerJobStats(randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong()); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java deleted file mode 100644 index 0091b21dc40..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatsTests.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.rollup.job; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; - -public class RollupJobStatsTests extends AbstractSerializingTestCase { - - @Override - protected RollupJobStats createTestInstance() { - return randomStats(); - } - - @Override - protected Writeable.Reader instanceReader() { - return RollupJobStats::new; - } - - @Override - protected RollupJobStats doParseInstance(XContentParser parser) { - return RollupJobStats.fromXContent(parser); - } - - public static RollupJobStats randomStats() { - return new RollupJobStats(randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong(), randomNonNegativeLong()); - } -} - diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java index 2c802a7e41d..f46bda788bf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatusTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.rollup.job; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.indexing.IndexerState; import java.util.HashMap; import java.util.Map; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index 3e36550e46f..df25b2fa126 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -78,7 +78,6 @@ public class SSLConfigurationReloaderTests extends ESTestCase { /** * Tests reloading a keystore that is used in the KeyManager of SSLContext */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32124") public void testReloadingKeyStore() throws Exception { assumeFalse("Can't run in a FIPS JVM", inFipsJvm()); final Path tempDir = createTempDir(); @@ -192,7 +191,6 @@ public class SSLConfigurationReloaderTests extends ESTestCase { * Tests the reloading of SSLContext when the trust store is modified. The same store is used as a TrustStore (for the * reloadable SSLContext used in the HTTPClient) and as a KeyStore for the MockWebServer */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32124") public void testReloadingTrustStore() throws Exception { assumeFalse("Can't run in a FIPS JVM", inFipsJvm()); Path tempDir = createTempDir(); @@ -479,7 +477,9 @@ public class SSLConfigurationReloaderTests extends ESTestCase { try (InputStream is = Files.newInputStream(keyStorePath)) { keyStore.load(is, keyStorePass.toCharArray()); } - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + // TODO Revisit TLS1.2 pinning when TLS1.3 is fully supported + // https://github.com/elastic/elasticsearch/issues/32276 + final SSLContext sslContext = new SSLContextBuilder().useProtocol("TLSv1.2").loadKeyMaterial(keyStore, keyStorePass.toCharArray()) .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); @@ -493,7 +493,9 @@ public class SSLConfigurationReloaderTests extends ESTestCase { keyStore.load(null, password.toCharArray()); keyStore.setKeyEntry("testnode_ec", PemUtils.readPrivateKey(keyPath, password::toCharArray), password.toCharArray(), CertParsingUtils.readCertificates(Collections.singletonList(certPath))); - final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, password.toCharArray()) + // TODO Revisit TLS1.2 pinning when TLS1.3 is fully supported + // https://github.com/elastic/elasticsearch/issues/32276 + final SSLContext sslContext = new SSLContextBuilder().useProtocol("TLSv1.2").loadKeyMaterial(keyStore, password.toCharArray()) .build(); MockWebServer server = new MockWebServer(sslContext, false); server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java index d1706fd708e..8537f2b6a38 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.rollup; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; @@ -17,7 +18,9 @@ import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.joda.time.DateTimeZone; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -32,6 +35,29 @@ public class RollupJobIdentifierUtils { private static final Comparator COMPARATOR = RollupJobIdentifierUtils.getComparator(); + public static final Map CALENDAR_ORDERING; + + static { + Map dateFieldUnits = new HashMap<>(16); + dateFieldUnits.put("year", 8); + dateFieldUnits.put("1y", 8); + dateFieldUnits.put("quarter", 7); + dateFieldUnits.put("1q", 7); + dateFieldUnits.put("month", 6); + dateFieldUnits.put("1M", 6); + dateFieldUnits.put("week", 5); + dateFieldUnits.put("1w", 5); + dateFieldUnits.put("day", 4); + dateFieldUnits.put("1d", 4); + dateFieldUnits.put("hour", 3); + dateFieldUnits.put("1h", 3); + dateFieldUnits.put("minute", 2); + dateFieldUnits.put("1m", 2); + dateFieldUnits.put("second", 1); + dateFieldUnits.put("1s", 1); + CALENDAR_ORDERING = Collections.unmodifiableMap(dateFieldUnits); + } + /** * Given the aggregation tree and a list of available job capabilities, this method will return a set * of the "best" jobs that should be searched. @@ -93,8 +119,9 @@ public class RollupJobIdentifierUtils { if (fieldCaps != null) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - TimeValue interval = TimeValue.parseTimeValue((String)agg.get(RollupField.INTERVAL), "date_histogram.interval"); - String thisTimezone = (String) agg.get(DateHistogramGroupConfig.TIME_ZONE); + DateHistogramInterval interval = new DateHistogramInterval((String)agg.get(RollupField.INTERVAL)); + + String thisTimezone = (String)agg.get(DateHistogramGroupConfig.TIME_ZONE); String sourceTimeZone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString(); // Ensure we are working on the same timezone @@ -102,17 +129,20 @@ public class RollupJobIdentifierUtils { continue; } if (source.dateHistogramInterval() != null) { - TimeValue sourceInterval = TimeValue.parseTimeValue(source.dateHistogramInterval().toString(), - "source.date_histogram.interval"); - //TODO should be divisor of interval - if (interval.compareTo(sourceInterval) <= 0) { + // Check if both are calendar and validate if they are. + // If not, check if both are fixed and validate + if (validateCalendarInterval(source.dateHistogramInterval(), interval)) { + localCaps.add(cap); + } else if (validateFixedInterval(source.dateHistogramInterval(), interval)) { localCaps.add(cap); } } else { - if (interval.getMillis() <= source.interval()) { + // check if config is fixed and validate if it is + if (validateFixedInterval(source.interval(), interval)) { localCaps.add(cap); } } + // not a candidate if we get here break; } } @@ -133,6 +163,55 @@ public class RollupJobIdentifierUtils { } } + private static boolean isCalendarInterval(DateHistogramInterval interval) { + return DateHistogramAggregationBuilder.DATE_FIELD_UNITS.containsKey(interval.toString()); + } + + static boolean validateCalendarInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Both must be calendar intervals + if (isCalendarInterval(requestInterval) == false || isCalendarInterval(configInterval) == false) { + return false; + } + + // The request must be gte the config. The CALENDAR_ORDERING map values are integers representing + // relative orders between the calendar units + int requestOrder = CALENDAR_ORDERING.getOrDefault(requestInterval.toString(), Integer.MAX_VALUE); + int configOrder = CALENDAR_ORDERING.getOrDefault(configInterval.toString(), Integer.MAX_VALUE); + + // All calendar units are multiples naturally, so we just care about gte + return requestOrder >= configOrder; + } + + static boolean validateFixedInterval(DateHistogramInterval requestInterval, + DateHistogramInterval configInterval) { + // Neither can be calendar intervals + if (isCalendarInterval(requestInterval) || isCalendarInterval(configInterval)) { + return false; + } + + // Both are fixed, good to conver to millis now + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + long requestIntervalMillis = TimeValue.parseTimeValue(requestInterval.toString(), + "date_histo.request.interval").getMillis(); + + // Must be a multiple and gte the config + return requestIntervalMillis >= configIntervalMillis && requestIntervalMillis % configIntervalMillis == 0; + } + + static boolean validateFixedInterval(long requestInterval, DateHistogramInterval configInterval) { + // config must not be a calendar interval + if (isCalendarInterval(configInterval)) { + return false; + } + long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), + "date_histo.config.interval").getMillis(); + + // Must be a multiple and gte the config + return requestInterval >= configIntervalMillis && requestInterval % configIntervalMillis == 0; + } + /** * Find the set of histo's with the largest interval */ @@ -144,8 +223,8 @@ public class RollupJobIdentifierUtils { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { Long interval = (long)agg.get(RollupField.INTERVAL); - // TODO should be divisor of interval - if (interval <= source.interval()) { + // query interval must be gte the configured interval, and a whole multiple + if (interval <= source.interval() && source.interval() % interval == 0) { localCaps.add(cap); } break; @@ -155,8 +234,8 @@ public class RollupJobIdentifierUtils { } if (localCaps.isEmpty()) { - throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + "] agg on field [" + - source.field() + "] which also satisfies all requirements of query."); + throw new IllegalArgumentException("There is not a rollup job that has a [" + source.getWriteableName() + + "] agg on field [" + source.field() + "] which also satisfies all requirements of query."); } // We are a leaf, save our best caps diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java index 9119a5445d4..94d64b17de8 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/IndexerUtils.java @@ -17,7 +17,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggre import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.rollup.Rollup; import java.util.ArrayList; @@ -46,7 +46,7 @@ class IndexerUtils { * @param isUpgradedDocID `true` if this job is using the new ID scheme * @return A list of rolled documents derived from the response */ - static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupJobStats stats, + static List processBuckets(CompositeAggregation agg, String rollupIndex, RollupIndexerJobStats stats, GroupConfig groupConfig, String jobId, boolean isUpgradedDocID) { logger.debug("Buckets: [" + agg.getBuckets().size() + "][" + jobId + "]"); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 6abb7ffa567..b1b052a3659 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -5,11 +5,6 @@ */ package org.elasticsearch.xpack.rollup.job; -import org.apache.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.unit.TimeValue; @@ -33,20 +28,22 @@ import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggreg import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.indexing.IterationResult; +import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; import java.util.ArrayList; -import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,30 +51,16 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.singletonList; -import static java.util.Collections.unmodifiableList; import static org.elasticsearch.xpack.core.rollup.RollupField.formatFieldName; /** - * An abstract class that builds a rollup index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, - * it will create the rollup index from the source index up to the last complete bucket that is allowed to be built (based on the current - * time and the delay set on the rollup job). Only one background job can run simultaneously and {@link #onFinish()} is called when the job - * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is - * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when - * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * An abstract implementation of {@link AsyncTwoPhaseIndexer} that builds a rollup index incrementally. */ -public abstract class RollupIndexer { - private static final Logger logger = Logger.getLogger(RollupIndexer.class.getName()); - +public abstract class RollupIndexer extends AsyncTwoPhaseIndexer, RollupIndexerJobStats> { static final String AGGREGATION_NAME = RollupField.NAME; private final RollupJob job; - private final RollupJobStats stats; - private final AtomicReference state; - private final AtomicReference> position; - private final Executor executor; protected final AtomicBoolean upgradedDocumentID; - private final CompositeAggregationBuilder compositeBuilder; private long maxBoundary; @@ -87,84 +70,16 @@ public abstract class RollupIndexer { * @param job The rollup job * @param initialState Initial state for the indexer * @param initialPosition The last indexed bucket of the task + * @param upgradedDocumentID whether job has updated IDs (for BWC) */ - RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, - Map initialPosition, AtomicBoolean upgradedDocumentID) { - this.executor = executor; + RollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, Map initialPosition, + AtomicBoolean upgradedDocumentID) { + super(executor, initialState, initialPosition, new RollupIndexerJobStats()); this.job = job; - this.stats = new RollupJobStats(); - this.state = initialState; - this.position = new AtomicReference<>(initialPosition); this.compositeBuilder = createCompositeBuilder(job.getConfig()); this.upgradedDocumentID = upgradedDocumentID; } - /** - * Executes the {@link SearchRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The search request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextSearch(SearchRequest request, ActionListener nextPhase); - - /** - * Executes the {@link BulkRequest} and calls nextPhase with the response - * or the exception if an error occurs. - * - * @param request The bulk request to execute - * @param nextPhase Listener for the next phase - */ - protected abstract void doNextBulk(BulkRequest request, ActionListener nextPhase); - - /** - * Called periodically during the execution of a background job. Implementation should - * persists the state somewhere and continue the execution asynchronously using next. - * - * @param state The current state of the indexer - * @param position The current position of the indexer - * @param next Runnable for the next phase - */ - protected abstract void doSaveState(IndexerState state, Map position, Runnable next); - - /** - * Called when a failure occurs in an async job causing the execution to stop. - * @param exc The exception - */ - protected abstract void onFailure(Exception exc); - - /** - * Called when a background job finishes. - */ - protected abstract void onFinish(); - - /** - * Called when a background job detects that the indexer is aborted causing the async execution - * to stop. - */ - protected abstract void onAbort(); - - /** - * Get the current state of the indexer. - */ - public IndexerState getState() { - return state.get(); - } - - /** - * Get the current position of the indexer. - */ - public Map getPosition() { - return position.get(); - } - - /** - * Get the stats of this indexer. - */ - public RollupJobStats getStats() { - return stats; - } - /** * Returns if this job has upgraded it's ID scheme yet or not */ @@ -172,229 +87,28 @@ public abstract class RollupIndexer { return upgradedDocumentID.get(); } - /** - * Sets the internal state to {@link IndexerState#STARTED} if the previous state was {@link IndexerState#STOPPED}. Setting the state to - * STARTED allows a job to run in the background when {@link #maybeTriggerAsyncJob(long)} is called. - * @return The new state for the indexer (STARTED, INDEXING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState start() { - state.compareAndSet(IndexerState.STOPPED, IndexerState.STARTED); - return state.get(); + @Override + protected String getJobId() { + return job.getConfig().getId(); } - /** - * Sets the internal state to {@link IndexerState#STOPPING} if an async job is running in the background and in such case - * {@link #onFinish()} will be called as soon as the background job detects that the indexer is stopped. If there is no job running when - * this function is called, the state is directly set to {@link IndexerState#STOPPED} and {@link #onFinish()} will never be called. - * @return The new state for the indexer (STOPPED, STOPPING or ABORTING if the job was already aborted). - */ - public synchronized IndexerState stop() { - IndexerState currentState = state.updateAndGet(previousState -> { - if (previousState == IndexerState.INDEXING) { - return IndexerState.STOPPING; - } else if (previousState == IndexerState.STARTED) { - return IndexerState.STOPPED; - } else { - return previousState; - } - }); - return currentState; - } - - /** - * Sets the internal state to {@link IndexerState#ABORTING}. It returns false if an async job is running in the background and in such - * case {@link #onAbort} will be called as soon as the background job detects that the indexer is aborted. If there is no job running - * when this function is called, it returns true and {@link #onAbort()} will never be called. - * @return true if the indexer is aborted, false if a background job is running and abort is delayed. - */ - public synchronized boolean abort() { - IndexerState prevState = state.getAndUpdate((prev) -> IndexerState.ABORTING); - return prevState == IndexerState.STOPPED || prevState == IndexerState.STARTED; - } - - /** - * Triggers a background job that builds the rollup index asynchronously iff there is no other job that runs - * and the indexer is started ({@link IndexerState#STARTED}. - * - * @param now The current time in milliseconds (used to limit the job to complete buckets) - * @return true if a job has been triggered, false otherwise - */ - public synchronized boolean maybeTriggerAsyncJob(long now) { - final IndexerState currentState = state.get(); - switch (currentState) { - case INDEXING: - case STOPPING: - case ABORTING: - logger.warn("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], but prior indexer is still running."); - return false; - - case STOPPED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() - + "] but job is stopped. Ignoring trigger."); - return false; - - case STARTED: - logger.debug("Schedule was triggered for rollup job [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - // Only valid time to start indexing is when we are STARTED but not currently INDEXING. - stats.incrementNumInvocations(1); - - // rounds the current time to its current bucket based on the date histogram interval. - // this is needed to exclude buckets that can still receive new documents. - DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); - long rounded = dateHisto.createRounding().round(now); - if (dateHisto.getDelay() != null) { - // if the job has a delay we filter all documents that appear before it. - maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); - } else { - maxBoundary = rounded; - } - - if (state.compareAndSet(IndexerState.STARTED, IndexerState.INDEXING)) { - // fire off the search. Note this is async, the method will return from here - executor.execute(() -> doNextSearch(buildSearchRequest(), - ActionListener.wrap(this::onSearchResponse, exc -> finishWithFailure(exc)))); - logger.debug("Beginning to rollup [" + job.getConfig().getId() + "], state: [" + currentState + "]"); - return true; - } else { - logger.debug("Could not move from STARTED to INDEXING state because current state is [" + state.get() + "]"); - return false; - } - - default: - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); + @Override + protected void onStartJob(long now) { + // this is needed to exclude buckets that can still receive new documents. + DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); + long rounded = dateHisto.createRounding().round(now); + if (dateHisto.getDelay() != null) { + // if the job has a delay we filter all documents that appear before it. + maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); + } else { + maxBoundary = rounded; } } - /** - * Checks the {@link IndexerState} and returns false if the execution - * should be stopped. - */ - private boolean checkState(IndexerState currentState) { - switch (currentState) { - case INDEXING: - // normal state; - return true; - - case STOPPING: - logger.info("Rollup job encountered [" + IndexerState.STOPPING + "] state, halting indexer."); - doSaveState(finishAndSetState(), getPosition(), () -> {}); - return false; - - case STOPPED: - return false; - - case ABORTING: - logger.info("Requested shutdown of indexer for job [" + job.getConfig().getId() + "]"); - onAbort(); - return false; - - default: - // Anything other than indexing, aborting or stopping is unanticipated - logger.warn("Encountered unexpected state [" + currentState + "] while indexing"); - throw new IllegalStateException("Rollup job encountered an illegal state [" + currentState + "]"); - } - } - - private void onBulkResponse(BulkResponse response, Map after) { - // TODO we should check items in the response and move after accordingly to resume the failing buckets ? - stats.incrementNumRollups(response.getItems().length); - if (response.hasFailures()) { - logger.warn("Error while attempting to bulk index rollup documents: " + response.buildFailureMessage()); - } - try { - if (checkState(getState()) == false) { - return ; - } - position.set(after); - ActionListener listener = ActionListener.wrap(this::onSearchResponse, this::finishWithFailure); - // TODO probably something more intelligent than every-50 is needed - if (stats.getNumPages() > 0 && stats.getNumPages() % 50 == 0) { - doSaveState(IndexerState.INDEXING, after, () -> doNextSearch(buildSearchRequest(), listener)); - } else { - doNextSearch(buildSearchRequest(), listener); - } - } catch (Exception e) { - finishWithFailure(e); - } - } - - private void onSearchResponse(SearchResponse searchResponse) { - try { - if (checkState(getState()) == false) { - return ; - } - if (searchResponse.getShardFailures().length != 0) { - throw new RuntimeException("Shard failures encountered while running indexer for rollup job [" - + job.getConfig().getId() + "]: " + Arrays.toString(searchResponse.getShardFailures())); - } - final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); - if (response == null) { - throw new IllegalStateException("Missing composite response for query: " + compositeBuilder.toString()); - } - stats.incrementNumPages(1); - if (response.getBuckets().isEmpty()) { - // this is the end... - logger.debug("Finished indexing for job [" + job.getConfig().getId() + "], saving state and shutting down."); - - // Change state first, then try to persist. This prevents in-progress STOPPING/ABORTING from - // being persisted as STARTED but then stop the job - doSaveState(finishAndSetState(), position.get(), this::onFinish); - return; - } - - final BulkRequest bulkRequest = new BulkRequest(); + @Override + protected SearchRequest buildSearchRequest() { // Indexer is single-threaded, and only place that the ID scheme can get upgraded is doSaveState(), so // we can pass down the boolean value rather than the atomic here - final List docs = IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), - stats, job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()); - docs.forEach(bulkRequest::add); - assert bulkRequest.requests().size() > 0; - doNextBulk(bulkRequest, - ActionListener.wrap( - bulkResponse -> onBulkResponse(bulkResponse, response.afterKey()), - exc -> finishWithFailure(exc) - ) - ); - } catch(Exception e) { - finishWithFailure(e); - } - } - - private void finishWithFailure(Exception exc) { - doSaveState(finishAndSetState(), position.get(), () -> onFailure(exc)); - } - - private IndexerState finishAndSetState() { - return state.updateAndGet( - prev -> { - switch (prev) { - case INDEXING: - // ready for another job - return IndexerState.STARTED; - - case STOPPING: - // must be started again - return IndexerState.STOPPED; - - case ABORTING: - // abort and exit - onAbort(); - return IndexerState.ABORTING; // This shouldn't matter, since onAbort() will kill the task first - - case STOPPED: - // No-op. Shouldn't really be possible to get here (should have to go through STOPPING - // first which will be handled) but is harmless to no-op and we don't want to throw exception here - return IndexerState.STOPPED; - - default: - // any other state is unanticipated at this point - throw new IllegalStateException("Rollup job encountered an illegal state [" + prev + "]"); - } - }); - } - - private SearchRequest buildSearchRequest() { final Map position = getPosition(); SearchSourceBuilder searchSource = new SearchSourceBuilder() .size(0) @@ -405,6 +119,16 @@ public abstract class RollupIndexer { return new SearchRequest(job.getConfig().getIndexPattern()).source(searchSource); } + @Override + protected IterationResult> doProcess(SearchResponse searchResponse) { + final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); + + return new IterationResult<>( + IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), getStats(), + job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()), + response.afterKey(), response.getBuckets().isEmpty()); + } + /** * Creates a skeleton {@link CompositeAggregationBuilder} from the provided job config. * @param config The config for the job. @@ -481,7 +205,7 @@ public abstract class RollupIndexer { final TermsGroupConfig terms = groupConfig.getTerms(); builders.addAll(createValueSourceBuilders(terms)); } - return unmodifiableList(builders); + return Collections.unmodifiableList(builders); } public static List> createValueSourceBuilders(final DateHistogramGroupConfig dateHistogram) { @@ -491,7 +215,7 @@ public abstract class RollupIndexer { dateHistogramBuilder.dateHistogramInterval(dateHistogram.getInterval()); dateHistogramBuilder.field(dateHistogramField); dateHistogramBuilder.timeZone(toDateTimeZone(dateHistogram.getTimeZone())); - return singletonList(dateHistogramBuilder); + return Collections.singletonList(dateHistogramBuilder); } public static List> createValueSourceBuilders(final HistogramGroupConfig histogram) { @@ -506,7 +230,7 @@ public abstract class RollupIndexer { builders.add(histogramBuilder); } } - return unmodifiableList(builders); + return Collections.unmodifiableList(builders); } public static List> createValueSourceBuilders(final TermsGroupConfig terms) { @@ -520,7 +244,7 @@ public abstract class RollupIndexer { builders.add(termsBuilder); } } - return unmodifiableList(builders); + return Collections.unmodifiableList(builders); } /** @@ -564,7 +288,7 @@ public abstract class RollupIndexer { } } } - return unmodifiableList(builders); + return Collections.unmodifiableList(builders); } private static DateTimeZone toDateTimeZone(final String timezone) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 65362f9ad9d..4a4b53575b2 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -25,13 +25,13 @@ import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.rollup.Rollup; @@ -218,7 +218,7 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE * Gets the stats for this task. * @return The stats of this task */ - public RollupJobStats getStats() { + public RollupIndexerJobStats getStats() { return indexer.getStats(); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java index 00aeb0d06ab..fcc1f2c4f57 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestGetRollupJobsAction.java @@ -12,21 +12,19 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.rollup.Rollup; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; - -import java.io.IOException; +import org.elasticsearch.xpack.rollup.Rollup; public class RestGetRollupJobsAction extends BaseRestHandler { public static final ParseField ID = new ParseField("id"); public RestGetRollupJobsAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(RestRequest.Method.GET, Rollup.BASE_PATH + "job/{id}/", this); + controller.registerHandler(RestRequest.Method.GET, Rollup.BASE_PATH + "job/{id}/", this); } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String id = restRequest.param(ID.getPreferredName()); GetRollupJobsAction.Request request = new GetRollupJobsAction.Request(id); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java index 3235d0c39e2..c23151c4c6a 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java @@ -61,6 +61,32 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { assertThat(bestCaps.size(), equalTo(1)); } + public void testBiggerButCompatibleFixedInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100s"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("1000s")); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + + public void testBiggerButCompatibleFixedMillisInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100ms"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .interval(1000); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + public void testIncompatibleInterval() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -75,6 +101,20 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { "[foo] which also satisfies all requirements of query.")); } + public void testIncompatibleFixedCalendarInterval() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("5d"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .dateHistogramInterval(new DateHistogramInterval("day")); + + RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + + "[foo] which also satisfies all requirements of query.")); + } + public void testBadTimeZone() { final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); @@ -385,6 +425,27 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { "[bar] which also satisfies all requirements of query.")); } + public void testHistoIntervalNotMultiple() { + HistogramAggregationBuilder histo = new HistogramAggregationBuilder("test_histo"); + histo.interval(10) // <--- interval is not a multiple of 3 + .field("bar") + .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) + .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); + + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", + new DateHistogramInterval("1d"), null, "UTC"), + new HistogramGroupConfig(3L, "bar"), + null); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + Exception e = expectThrows(RuntimeException.class, + () -> RollupJobIdentifierUtils.findBestJobs(histo, caps)); + assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [histogram] agg on field " + + "[bar] which also satisfies all requirements of query.")); + } + public void testMissingMetric() { int i = ESTestCase.randomIntBetween(0, 3); @@ -417,6 +478,105 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } + public void testValidateFixedInterval() { + boolean valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(200, new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(1000, new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(10*5*60*1000, new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("second")); + assertFalse(valid); + + // ----------- + // Same tests, with both being DateHistoIntervals + // ----------- + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("200ms"), + new DateHistogramInterval("100ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("1000ms"), + new DateHistogramInterval("200ms")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("5m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("20m"), + new DateHistogramInterval("5m")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("500ms")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("5m")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("second")); + assertFalse(valid); + } + + public void testValidateCalendarInterval() { + boolean valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("minute"), + new DateHistogramInterval("second")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("month"), + new DateHistogramInterval("day")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("1d"), + new DateHistogramInterval("1s")); + assertTrue(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("minute")); + assertFalse(valid); + + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("second"), + new DateHistogramInterval("1m")); + assertFalse(valid); + + // Fails because both are actually fixed + valid = RollupJobIdentifierUtils.validateCalendarInterval(new DateHistogramInterval("100ms"), + new DateHistogramInterval("100ms")); + assertFalse(valid); + } + private Set singletonSet(RollupJobCaps cap) { Set caps = new HashSet<>(); caps.add(cap); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java index d74e7413d15..098bc83bc70 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java @@ -41,7 +41,7 @@ import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStats; +import org.elasticsearch.xpack.core.rollup.job.RollupIndexerJobStats; import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTime; import org.mockito.stubbing.Answer; @@ -66,7 +66,7 @@ import static org.mockito.Mockito.when; public class IndexerUtilsTests extends AggregatorTestCase { public void testMissingFields() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats = new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -130,7 +130,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { public void testCorrectFields() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -198,7 +198,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { public void testNumericTerms() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats= new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "the_histo"; String valueField = "the_avg"; @@ -255,7 +255,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { public void testEmptyCounts() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String timestampField = "ts"; String valueField = "the_avg"; @@ -362,7 +362,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { // The content of the config don't actually matter for this test // because the test is just looking at agg keys GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(123L, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", false); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", false); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("1237859798")); } @@ -406,7 +406,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1L, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$c9LcrFqeFW92uN_Z7sv1hA")); } @@ -456,7 +456,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), new HistogramGroupConfig(1, "abc"), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", true); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), groupConfig, "foo", true); assertThat(docs.size(), equalTo(1)); assertThat(docs.get(0).id(), equalTo("foo$VAFKZpyaEqYRPLyic57_qw")); } @@ -483,14 +483,15 @@ public class IndexerUtilsTests extends AggregatorTestCase { }); GroupConfig groupConfig = new GroupConfig(randomDateHistogramGroupConfig(random()), randomHistogramGroupConfig(random()), null); - List docs = IndexerUtils.processBuckets(composite, "foo", new RollupJobStats(), groupConfig, "foo", randomBoolean()); + List docs = IndexerUtils.processBuckets(composite, "foo", new RollupIndexerJobStats(), + groupConfig, "foo", randomBoolean()); assertThat(docs.size(), equalTo(1)); assertFalse(Strings.isNullOrEmpty(docs.get(0).id())); } public void testMissingBuckets() throws IOException { String indexName = randomAlphaOfLengthBetween(1, 10); - RollupJobStats stats= new RollupJobStats(0, 0, 0, 0); + RollupIndexerJobStats stats = new RollupIndexerJobStats(0, 0, 0, 0); String metricField = "metric_field"; String valueField = "value_field"; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 6d29ee9f9ba..55f1cfbdbb2 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -50,10 +50,10 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.junit.Before; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 955dcbc2beb..c74ecbadf4f 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; import org.mockito.stubbing.Answer; @@ -639,7 +639,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -743,7 +743,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -763,7 +763,7 @@ public class RollupIndexerStateTests extends ESTestCase { Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); Consumer failureConsumer = e -> { - assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for rollup job")); + assertThat(e.getMessage(), startsWith("Shard failures encountered while running indexer for job")); isFinished.set(true); }; @@ -786,7 +786,7 @@ public class RollupIndexerStateTests extends ESTestCase { // Note: no pages processed, no docs were indexed assertThat(indexer.getStats().getNumPages(), equalTo(0L)); - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -896,7 +896,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.getStats().getNumPages(), equalTo(1L)); // Note: no docs were indexed - assertThat(indexer.getStats().getNumRollups(), equalTo(0L)); + assertThat(indexer.getStats().getOutputDocuments(), equalTo(0L)); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 9a75d6fc675..a47d057b5d5 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -20,11 +20,11 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; diff --git a/x-pack/plugin/security/cli/build.gradle b/x-pack/plugin/security/cli/build.gradle index 426c48aac80..377d10ec7f2 100644 --- a/x-pack/plugin/security/cli/build.gradle +++ b/x-pack/plugin/security/cli/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.precommit.ForbiddenApisCliTask + apply plugin: 'elasticsearch.build' archivesBaseName = 'elasticsearch-security-cli' @@ -6,8 +8,8 @@ dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here compileOnly project(path: xpackModule('core'), configuration: 'default') - compile 'org.bouncycastle:bcprov-jdk15on:1.59' compile 'org.bouncycastle:bcpkix-jdk15on:1.59' + compile 'org.bouncycastle:bcprov-jdk15on:1.59' testImplementation 'com.google.jimfs:jimfs:1.1' testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" @@ -20,6 +22,14 @@ dependencyLicenses { mapping from: /bc.*/, to: 'bouncycastle' } -if (inFipsJvm) { +if (project.inFipsJvm) { test.enabled = false + // Forbiden APIs non-portable checks fail because bouncy castle classes being used from the FIPS JDK since those are + // not part of the Java specification - all of this is as designed, so we have to relax this check for FIPS. + tasks.withType(ForbiddenApisCliTask) { + bundledSignatures -= "jdk-non-portable" + } + // FIPS JVM includes manny classes from bouncycastle which count as jar hell for the third party audit, + // rather than provide a long list of exclusions, disable the check on FIPS. + thirdPartyAudit.enabled = false } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java index 2bd1bdf906a..f97afc1d52c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosTestCase.java @@ -74,6 +74,13 @@ public abstract class KerberosTestCase extends ESTestCase { unsupportedLocaleLanguages.add("ne"); unsupportedLocaleLanguages.add("dz"); unsupportedLocaleLanguages.add("mzn"); + unsupportedLocaleLanguages.add("mr"); + unsupportedLocaleLanguages.add("as"); + unsupportedLocaleLanguages.add("bn"); + unsupportedLocaleLanguages.add("lrc"); + unsupportedLocaleLanguages.add("my"); + unsupportedLocaleLanguages.add("ps"); + unsupportedLocaleLanguages.add("ur"); } @BeforeClass diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index a2c70db3b63..40009691875 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -259,7 +259,7 @@ public class NativeRolesStoreTests extends ESTestCase { .put(IndexMetaData.builder(securityIndexName).settings(settings)) .put(new IndexTemplateMetaData(SecurityIndexManager.SECURITY_TEMPLATE_NAME, 0, 0, Collections.singletonList(securityIndexName), Settings.EMPTY, ImmutableOpenMap.of(), - ImmutableOpenMap.of(), ImmutableOpenMap.of())) + ImmutableOpenMap.of())) .build(); if (withAlias) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java index 5fb8a754f0f..b8faedec718 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/agg/Aggs.java @@ -112,6 +112,9 @@ public class Aggs { } public Aggs addAgg(LeafAgg agg) { + if (metricAggs.contains(agg)) { + return this; + } return new Aggs(groups, combine(metricAggs, agg), pipelineAggs); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index f3fa8114ddb..759ddbad2b4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -210,3 +210,4 @@ setup: job_state: "stopped" upgraded_doc_id: true + diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index 620d575fc80..279d768fde8 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -11,7 +11,6 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; @@ -22,13 +21,16 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; import java.util.Collections; +import java.util.Comparator; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -45,7 +47,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste Setting.boolSetting("xpack.watcher.require_manual_start", false, Property.NodeScope); private final AtomicReference state = new AtomicReference<>(WatcherState.STARTED); - private final AtomicReference> previousAllocationIds = new AtomicReference<>(Collections.emptyList()); + private final AtomicReference> previousShardRoutings = new AtomicReference<>(Collections.emptyList()); private final boolean requireManualStart; private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. private volatile WatcherService watcherService; @@ -110,6 +112,7 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste // if this is not a data node, we need to start it ourselves possibly if (event.state().nodes().getLocalNode().isDataNode() == false && isWatcherStoppedManually == false && this.state.get() == WatcherState.STOPPED) { + this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); return; } @@ -144,15 +147,20 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste return; } - List currentAllocationIds = localShards.stream() - .map(ShardRouting::allocationId) - .map(AllocationId::getId) - .sorted() + // also check if non local shards have changed, as loosing a shard on a + // remote node or adding a replica on a remote node needs to trigger a reload too + Set localShardIds = localShards.stream().map(ShardRouting::shardId).collect(Collectors.toSet()); + List allShards = event.state().routingTable().index(watchIndex).shardsWithState(STARTED); + allShards.addAll(event.state().routingTable().index(watchIndex).shardsWithState(RELOCATING)); + List localAffectedShardRoutings = allShards.stream() + .filter(shardRouting -> localShardIds.contains(shardRouting.shardId())) + // shardrouting is not comparable, so we need some order mechanism + .sorted(Comparator.comparing(ShardRouting::hashCode)) .collect(Collectors.toList()); - if (previousAllocationIds.get().equals(currentAllocationIds) == false) { + if (previousShardRoutings.get().equals(localAffectedShardRoutings) == false) { if (watcherService.validate(event.state())) { - previousAllocationIds.set(Collections.unmodifiableList(currentAllocationIds)); + previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { watcherService.reload(event.state(), "new local watcher shard allocation ids"); } else if (state.get() == WatcherState.STOPPED) { @@ -187,13 +195,13 @@ public class WatcherLifeCycleService extends AbstractComponent implements Cluste * @return true, if existing allocation ids were cleaned out, false otherwise */ private boolean clearAllocationIds() { - List previousIds = previousAllocationIds.getAndSet(Collections.emptyList()); - return previousIds.equals(Collections.emptyList()) == false; + List previousIds = previousShardRoutings.getAndSet(Collections.emptyList()); + return previousIds.isEmpty() == false; } // for testing purposes only - List allocationIds() { - return previousAllocationIds.get(); + List shardRoutings() { + return previousShardRoutings.get(); } public WatcherState getState() { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index 49915674fe9..599287bb50a 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -183,9 +183,6 @@ public class WatcherService extends AbstractComponent { // by checking the cluster state version before and after loading the watches we can potentially just exit without applying the // changes processedClusterStateVersion.set(state.getVersion()); - triggerService.pauseExecution(); - int cancelledTaskCount = executionService.clearExecutionsAndQueue(); - logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), e -> logger.error("error reloading watcher", e))); @@ -221,6 +218,7 @@ public class WatcherService extends AbstractComponent { if (processedClusterStateVersion.get() != state.getVersion()) { logger.debug("watch service has not been reloaded for state [{}], another reload for state [{}] in progress", state.getVersion(), processedClusterStateVersion.get()); + return false; } Collection watches = loadWatches(state); @@ -231,7 +229,13 @@ public class WatcherService extends AbstractComponent { // if we had another state coming in the meantime, we will not start the trigger engines with these watches, but wait // until the others are loaded + // also this is the place where we pause the trigger service execution and clear the current execution service, so that we make sure + // that existing executions finish, but no new ones are executed if (processedClusterStateVersion.get() == state.getVersion()) { + triggerService.pauseExecution(); + int cancelledTaskCount = executionService.clearExecutionsAndQueue(); + logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); + executionService.unPause(); triggerService.start(watches); if (triggeredWatches.isEmpty() == false) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index 05aa7cf3028..4c10f794880 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -56,7 +56,7 @@ public class TickerScheduleTriggerEngine extends ScheduleTriggerEngine { schedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); } } - this.schedules.putAll(schedules); + this.schedules = schedules; } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 700901753d4..384338af5a2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -254,9 +254,12 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { .add(newNode("node_2")) .build(); + ShardRouting firstShardOnSecondNode = TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED); + ShardRouting secondShardOnFirstNode = TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) .build(); IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) @@ -273,10 +276,19 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { .metaData(MetaData.builder().put(indexMetaData, false)) .build(); + // add a replica in the local node + boolean addShardOnLocalNode = randomBoolean(); + final ShardRouting addedShardRouting; + if (addShardOnLocalNode) { + addedShardRouting = TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED); + } else { + addedShardRouting = TestShardRouting.newShardRouting(secondShardId, "node_2", false, STARTED); + } + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) - .addShard(TestShardRouting.newShardRouting(shardId, "node_1", false, STARTED)) - .addShard(TestShardRouting.newShardRouting(secondShardId, "node_1", true, STARTED)) - .addShard(TestShardRouting.newShardRouting(shardId, "node_2", true, STARTED)) + .addShard(secondShardOnFirstNode) + .addShard(firstShardOnSecondNode) + .addShard(addedShardRouting) .build(); ClusterState stateWithReplicaAdded = ClusterState.builder(new ClusterName("my-cluster")) @@ -477,7 +489,67 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); } - private ClusterState startWatcher() { + // this emulates a node outage somewhere in the cluster that carried a watcher shard + // the number of shards remains the same, but we need to ensure that watcher properly reloads + // previously we only checked the local shard allocations, but we also need to check if shards in the cluster have changed + public void testWatcherReloadsOnNodeOutageWithWatcherShard() { + Index watchIndex = new Index(Watch.INDEX, "foo"); + ShardId shardId = new ShardId(watchIndex, 0); + String localNodeId = randomFrom("node_1", "node_2"); + String outageNodeId = localNodeId.equals("node_1") ? "node_2" : "node_1"; + DiscoveryNodes previousDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .add(newNode(outageNodeId)) + .build(); + + ShardRouting replicaShardRouting = TestShardRouting.newShardRouting(shardId, localNodeId, false, STARTED); + ShardRouting primartShardRouting = TestShardRouting.newShardRouting(shardId, outageNodeId, true, STARTED); + IndexRoutingTable previousWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(replicaShardRouting) + .addShard(primartShardRouting) + .build(); + + IndexMetaData indexMetaData = IndexMetaData.builder(Watch.INDEX) + .settings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 6) + ).build(); + + ClusterState previousState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(previousDiscoveryNodes) + .routingTable(RoutingTable.builder().add(previousWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + ShardRouting nowPrimaryShardRouting = replicaShardRouting.moveActiveReplicaToPrimary(); + IndexRoutingTable currentWatchRoutingTable = IndexRoutingTable.builder(watchIndex) + .addShard(nowPrimaryShardRouting) + .build(); + + DiscoveryNodes currentDiscoveryNodes = new DiscoveryNodes.Builder().masterNodeId(localNodeId).localNodeId(localNodeId) + .add(newNode(localNodeId)) + .build(); + + ClusterState currentState = ClusterState.builder(new ClusterName("my-cluster")) + .nodes(currentDiscoveryNodes) + .routingTable(RoutingTable.builder().add(currentWatchRoutingTable).build()) + .metaData(MetaData.builder().put(indexMetaData, false)) + .build(); + + // initialize the previous state, so all the allocation ids are loaded + when(watcherService.validate(anyObject())).thenReturn(true); + lifeCycleService.clusterChanged(new ClusterChangedEvent("whatever", previousState, currentState)); + + reset(watcherService); + when(watcherService.validate(anyObject())).thenReturn(true); + ClusterChangedEvent event = new ClusterChangedEvent("whatever", currentState, previousState); + lifeCycleService.clusterChanged(event); + verify(watcherService).reload(eq(event.state()), anyString()); + } + + private void startWatcher() { Index index = new Index(Watch.INDEX, "uuid"); IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); indexRoutingTableBuilder.addShard( @@ -506,12 +578,10 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", state, emptyState)); assertThat(lifeCycleService.getState(), is(WatcherState.STARTED)); verify(watcherService, times(1)).reload(eq(state), anyString()); - assertThat(lifeCycleService.allocationIds(), hasSize(1)); + assertThat(lifeCycleService.shardRoutings(), hasSize(1)); // reset the mock, the user has to mock everything themselves again reset(watcherService); - - return state; } private List randomIndexPatterns() { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 7949998867b..6680b38ab94 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -35,7 +35,9 @@ import java.util.function.Consumer; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.daily; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval; import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.weekly; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; import static org.joda.time.DateTimeZone.UTC; import static org.mockito.Mockito.mock; @@ -50,8 +52,12 @@ public class TickerScheduleEngineTests extends ESTestCase { } private TriggerEngine createEngine() { - return new TickerScheduleTriggerEngine(Settings.EMPTY, - mock(ScheduleRegistry.class), clock); + Settings settings = Settings.EMPTY; + // having a low value here speeds up the tests tremendously, we still want to run with the defaults every now and then + if (usually()) { + settings = Settings.builder().put(TickerScheduleTriggerEngine.TICKER_INTERVAL_SETTING.getKey(), "10ms").build(); + } + return new TickerScheduleTriggerEngine(settings, mock(ScheduleRegistry.class), clock); } private void advanceClockIfNeeded(DateTime newCurrentDateTime) { @@ -104,6 +110,40 @@ public class TickerScheduleEngineTests extends ESTestCase { assertThat(bits.cardinality(), is(count)); } + public void testStartClearsExistingSchedules() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + List firedWatchIds = new ArrayList<>(); + engine.register(new Consumer>() { + @Override + public void accept(Iterable events) { + for (TriggerEvent event : events) { + firedWatchIds.add(event.jobName()); + } + latch.countDown(); + } + }); + + int count = randomIntBetween(2, 5); + List watches = new ArrayList<>(); + for (int i = 0; i < count; i++) { + watches.add(createWatch(String.valueOf(i), interval("1s"))); + } + engine.start(watches); + + watches.clear(); + for (int i = 0; i < count; i++) { + watches.add(createWatch("another_id" + i, interval("1s"))); + } + engine.start(watches); + + advanceClockIfNeeded(new DateTime(clock.millis(), UTC).plusMillis(1100)); + if (!latch.await(3 * count, TimeUnit.SECONDS)) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(firedWatchIds, everyItem(startsWith("another_id"))); + } + public void testAddHourly() throws Exception { final String name = "job_name"; final CountDownLatch latch = new CountDownLatch(1); diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java index 43ad4dc0a45..fb9c665b2bf 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java @@ -173,7 +173,7 @@ public class RollupIT extends ESRestTestCase { " \"date_histo\": {\n" + " \"date_histogram\": {\n" + " \"field\": \"timestamp\",\n" + - " \"interval\": \"1h\",\n" + + " \"interval\": \"60m\",\n" + " \"format\": \"date_time\"\n" + " },\n" + " \"aggs\": {\n" + diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index f56f96efc78..f7ecb6d58e5 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -106,6 +106,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32299") public void testMonitorClusterHealth() throws Exception { String watchId = "cluster_health_watch"; diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec index f42ce0ef7a0..f1ab9160b1a 100644 --- a/x-pack/qa/sql/src/main/resources/agg.sql-spec +++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec @@ -394,4 +394,12 @@ SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM aggMultiWithHavingOnCount SELECT MIN(salary) min, MAX(salary) max, gender g, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g HAVING c > 40 ORDER BY gender; aggMultiGroupByMultiWithHavingOnCount -SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; \ No newline at end of file +SELECT MIN(salary) min, MAX(salary) max, gender g, languages l, COUNT(*) c FROM "test_emp" WHERE languages > 0 GROUP BY g, languages HAVING c > 40 ORDER BY gender, languages; + +// repetion of same aggs to check whether the generated query contains duplicates or not +aggRepeatFunctionAcrossFields +SELECT MIN(emp_no) AS a, 1 + MIN(emp_no) AS b, ABS(MIN(emp_no)) AS c FROM test_emp; +aggRepeatFunctionBetweenSelectAndHaving +SELECT gender, COUNT(DISTINCT languages) AS c FROM test_emp GROUP BY gender HAVING count(DISTINCT languages) > 0 ORDER BY gender; + +