diff --git a/.ci/init.gradle b/.ci/init.gradle index 62e6c48b812..ec16b49bfab 100644 --- a/.ci/init.gradle +++ b/.ci/init.gradle @@ -6,6 +6,7 @@ if (System.env.ELASTIC_ARTIFACTORY_USERNAME == null || System.env.ELASTIC_ARTIFA settings.pluginManagement { repositories { maven { + name "artifactory-gradle-plugins" url "https://artifactory.elstc.co/artifactory/gradle-plugins" credentials { username System.env.ELASTIC_ARTIFACTORY_USERNAME @@ -21,6 +22,7 @@ if (System.env.ELASTIC_ARTIFACTORY_USERNAME == null || System.env.ELASTIC_ARTIFA buildscript { repositories { maven { + name "artifactory-gradle-release" url "https://artifactory.elstc.co/artifactory/gradle-release/" credentials { username System.env.ELASTIC_ARTIFACTORY_USERNAME @@ -31,6 +33,7 @@ if (System.env.ELASTIC_ARTIFACTORY_USERNAME == null || System.env.ELASTIC_ARTIFA } repositories { maven { + name "artifactory-gradle-release" url "https://artifactory.elstc.co/artifactory/gradle-release/" credentials { username System.env.ELASTIC_ARTIFACTORY_USERNAME diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index 3545103f191..0f945376ee4 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -14,3 +14,5 @@ ES_RUNTIME_JAVA: - zulu8 - zulu11 - zulu12 + - corretto11 + - corretto8 diff --git a/build.gradle b/build.gradle index 3cd35d2a5d4..c28a1896912 100644 --- a/build.gradle +++ b/build.gradle @@ -88,7 +88,7 @@ subprojects { } repositories { maven { - name = 'localTest' + name = 'test' url = "${rootProject.buildDir}/local-test-repo" } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3ff07564b5b..65ee08bb50c 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -126,6 +126,7 @@ dependencies { compile 'com.avast.gradle:gradle-docker-compose-plugin:0.8.12' testCompile "junit:junit:${props.getProperty('junit')}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" + testCompile 'com.github.tomakehurst:wiremock-jre8-standalone:2.23.2' } /***************************************************************************** @@ -200,7 +201,7 @@ if (project != rootProject) { task integTest(type: Test) { // integration test requires the local testing repo for example plugin builds dependsOn project.rootProject.allprojects.collect { - it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'} + it.tasks.matching { it.name == 'publishNebulaPublicationToTestRepository'} } dependsOn setupLocalDownloads exclude "**/*Tests.class" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index f1c6721aa0d..0357c2f76ef 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -39,6 +39,9 @@ import org.gradle.api.artifacts.ModuleVersionIdentifier import org.gradle.api.artifacts.ProjectDependency import org.gradle.api.artifacts.ResolvedArtifact import org.gradle.api.artifacts.dsl.RepositoryHandler +import org.gradle.api.artifacts.repositories.ArtifactRepository +import org.gradle.api.artifacts.repositories.IvyArtifactRepository +import org.gradle.api.artifacts.repositories.MavenArtifactRepository import org.gradle.api.credentials.HttpHeaderCredentials import org.gradle.api.execution.TaskActionListener import org.gradle.api.execution.TaskExecutionGraph @@ -580,6 +583,16 @@ class BuildPlugin implements Plugin { /** Adds repositories used by ES dependencies */ static void configureRepositories(Project project) { + project.getRepositories().all { repository -> + if (repository instanceof MavenArtifactRepository) { + final MavenArtifactRepository maven = (MavenArtifactRepository) repository + assertRepositoryURIUsesHttps(maven, project, maven.getUrl()) + repository.getArtifactUrls().each { uri -> assertRepositoryURIUsesHttps(project, uri) } + } else if (repository instanceof IvyArtifactRepository) { + final IvyArtifactRepository ivy = (IvyArtifactRepository) repository + assertRepositoryURIUsesHttps(ivy, project, ivy.getUrl()) + } + } RepositoryHandler repos = project.repositories if (System.getProperty("repos.mavenLocal") != null) { // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is @@ -589,6 +602,7 @@ class BuildPlugin implements Plugin { } repos.jcenter() repos.ivy { + name "elasticsearch" url "https://artifacts.elastic.co/downloads" patternLayout { artifact "elasticsearch/[module]-[revision](-[classifier]).[ext]" @@ -617,6 +631,12 @@ class BuildPlugin implements Plugin { } } + private static void assertRepositoryURIUsesHttps(final ArtifactRepository repository, final Project project, final URI uri) { + if (uri != null && uri.toURL().getProtocol().equals("http")) { + throw new GradleException("repository [${repository.name}] on project with path [${project.path}] is using http for artifacts on [${uri.toURL()}]") + } + } + /** * Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms. * diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 58ef5b6d323..d5bdd211702 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -99,9 +99,8 @@ class PluginBuildPlugin extends BuildPlugin { project.tasks.run.dependsOn(project.tasks.bundlePlugin) if (isModule) { - project.tasks.run.clusterConfig.module(project) project.tasks.run.clusterConfig.distribution = System.getProperty( - 'run.distribution', 'integ-test-zip' + 'run.distribution', isXPackModule ? 'default' : 'oss' ) } else { project.tasks.run.clusterConfig.plugin(project.path) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 824cb161a63..7224bec4bd8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -964,6 +964,8 @@ class ClusterFormationTasks { } doLast { project.delete(node.pidFile) + // Large tests can exhaust disk space, clean up jdk from the distribution to save some space + project.delete(new File(node.homeDir, "jdk")) } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index eec46f9a522..ef784b6f901 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -70,7 +70,7 @@ class RestIntegTestTask extends DefaultTask { project.testClusters { "$name" { distribution = 'INTEG_TEST' - version = project.version + version = VersionProperties.elasticsearch javaHome = project.file(project.ext.runtimeJavaHome) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 093733fe844..0262c7d8151 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -174,6 +174,7 @@ class VagrantTestPlugin implements Plugin { which should work for 5.0.0+. This isn't a real ivy repository but gradle is fine with that */ repos.ivy { + name "elasticsearch" artifactPattern "https://artifacts.elastic.co/downloads/elasticsearch/[module]-[revision].[ext]" } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java index d8897053f05..9cb3cc52dd0 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java @@ -20,16 +20,14 @@ package org.elasticsearch.gradle; public enum Distribution { - INTEG_TEST("elasticsearch", "integ-test-zip"), - DEFAULT("elasticsearch", "elasticsearch"), - OSS("elasticsearch-oss", "elasticsearch-oss"); + INTEG_TEST("elasticsearch"), + DEFAULT("elasticsearch"), + OSS("elasticsearch-oss"); private final String artifactName; - private final String group; - Distribution(String name, String group) { + Distribution(String name) { this.artifactName = name; - this.group = group; } public String getArtifactName() { @@ -37,7 +35,11 @@ public enum Distribution { } public String getGroup() { - return "org.elasticsearch.distribution." + group; + if (this.equals(INTEG_TEST)) { + return "org.elasticsearch.distribution.integ-test-zip"; + } else { + return "org.elasticsearch.distribution." + name().toLowerCase(); + } } public String getFileExtension() { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java new file mode 100644 index 00000000000..aa26f398e8b --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import org.gradle.api.Buildable; +import org.gradle.api.Project; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.TaskDependency; + +import java.io.File; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.regex.Pattern; + +public class Jdk implements Buildable, Iterable { + + static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+)(@([a-f0-9]{32}))?"); + private static final List ALLOWED_PLATFORMS = Collections.unmodifiableList(Arrays.asList("linux", "windows", "darwin")); + + private final String name; + private final Configuration configuration; + + private final Property version; + private final Property platform; + + + Jdk(String name, Project project) { + this.name = name; + this.configuration = project.getConfigurations().create("jdk_" + name); + this.version = project.getObjects().property(String.class); + this.platform = project.getObjects().property(String.class); + } + + public String getName() { + return name; + } + + public String getVersion() { + return version.get(); + } + + public void setVersion(String version) { + if (VERSION_PATTERN.matcher(version).matches() == false) { + throw new IllegalArgumentException("malformed version [" + version + "] for jdk [" + name + "]"); + } + this.version.set(version); + } + + public String getPlatform() { + return platform.get(); + } + + public void setPlatform(String platform) { + if (ALLOWED_PLATFORMS.contains(platform) == false) { + throw new IllegalArgumentException( + "unknown platform [" + platform + "] for jdk [" + name + "], must be one of " + ALLOWED_PLATFORMS); + } + this.platform.set(platform); + } + + // pkg private, for internal use + Configuration getConfiguration() { + return configuration; + } + + @Override + public String toString() { + return configuration.getSingleFile().toString(); + } + + @Override + public TaskDependency getBuildDependencies() { + return configuration.getBuildDependencies(); + } + + // internal, make this jdks configuration unmodifiable + void finalizeValues() { + if (version.isPresent() == false) { + throw new IllegalArgumentException("version not specified for jdk [" + name + "]"); + } + if (platform.isPresent() == false) { + throw new IllegalArgumentException("platform not specified for jdk [" + name + "]"); + } + version.finalizeValue(); + platform.finalizeValue(); + } + + @Override + public Iterator iterator() { + return configuration.iterator(); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java new file mode 100644 index 00000000000..a6372dfd231 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import org.gradle.api.Action; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.UnknownTaskException; +import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.ConfigurationContainer; +import org.gradle.api.artifacts.dsl.DependencyHandler; +import org.gradle.api.artifacts.repositories.IvyArtifactRepository; +import org.gradle.api.file.CopySpec; +import org.gradle.api.file.FileTree; +import org.gradle.api.file.RelativePath; +import org.gradle.api.tasks.Copy; +import org.gradle.api.tasks.TaskProvider; + +import java.io.File; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.function.Supplier; +import java.util.regex.Matcher; + +public class JdkDownloadPlugin implements Plugin { + + @Override + public void apply(Project project) { + NamedDomainObjectContainer jdksContainer = project.container(Jdk.class, name -> + new Jdk(name, project) + ); + project.getExtensions().add("jdks", jdksContainer); + + project.afterEvaluate(p -> { + for (Jdk jdk : jdksContainer) { + jdk.finalizeValues(); + String version = jdk.getVersion(); + String platform = jdk.getPlatform(); + + // depend on the jdk directory "artifact" from the root project + DependencyHandler dependencies = project.getDependencies(); + Map depConfig = new HashMap<>(); + depConfig.put("path", ":"); // root project + depConfig.put("configuration", configName("extracted_jdk", version, platform)); + dependencies.add(jdk.getConfiguration().getName(), dependencies.project(depConfig)); + + // ensure a root level jdk download task exists + setupRootJdkDownload(project.getRootProject(), platform, version); + } + }); + } + + private static void setupRootJdkDownload(Project rootProject, String platform, String version) { + String extractTaskName = "extract" + capitalize(platform) + "Jdk" + version; + // NOTE: this is *horrendous*, but seems to be the only way to check for the existence of a registered task + try { + rootProject.getTasks().named(extractTaskName); + // already setup this version + return; + } catch (UnknownTaskException e) { + // fall through: register the task + } + + // decompose the bundled jdk version, broken into elements as: [feature, interim, update, build] + // Note the "patch" version is not yet handled here, as it has not yet been used by java. + Matcher jdkVersionMatcher = Jdk.VERSION_PATTERN.matcher(version); + if (jdkVersionMatcher.matches() == false) { + throw new IllegalArgumentException("Malformed jdk version [" + version + "]"); + } + String jdkVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : ""); + String jdkMajor = jdkVersionMatcher.group(1); + String jdkBuild = jdkVersionMatcher.group(3); + String hash = jdkVersionMatcher.group(5); + + // add fake ivy repo for jdk url + String repoName = "jdk_repo_" + version; + if (rootProject.getRepositories().findByName(repoName) == null) { + // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back + rootProject.getRepositories().ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> + layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + // current pattern since 12.0.1 + rootProject.getRepositories().ivy(ivyRepo -> { + ivyRepo.setName(repoName + "_with_hash"); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> layout.artifact( + "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("jdk")); + }); + } + + // add the jdk as a "dependency" + final ConfigurationContainer configurations = rootProject.getConfigurations(); + String remoteConfigName = configName("openjdk", version, platform); + String localConfigName = configName("extracted_jdk", version, platform); + Configuration jdkConfig = configurations.findByName(remoteConfigName); + if (jdkConfig == null) { + jdkConfig = configurations.create(remoteConfigName); + configurations.create(localConfigName); + } + String extension = platform.equals("windows") ? "zip" : "tar.gz"; + String jdkDep = "jdk:" + (platform.equals("darwin") ? "osx" : platform) + ":" + jdkVersion + "@" + extension; + rootProject.getDependencies().add(configName("openjdk", version, platform), jdkDep); + + // add task for extraction + // TODO: look into doing this as an artifact transform, which are cacheable starting in gradle 5.3 + int rootNdx = platform.equals("darwin") ? 2 : 1; + Action removeRootDir = copy -> { + // remove extra unnecessary directory levels + copy.eachFile(details -> { + String[] pathSegments = details.getRelativePath().getSegments(); + String[] newPathSegments = Arrays.copyOfRange(pathSegments, rootNdx, pathSegments.length); + details.setRelativePath(new RelativePath(true, newPathSegments)); + }); + copy.setIncludeEmptyDirs(false); + }; + // delay resolving jdkConfig until runtime + Supplier jdkArchiveGetter = jdkConfig::getSingleFile; + final Callable fileGetter; + if (extension.equals("zip")) { + fileGetter = () -> rootProject.zipTree(jdkArchiveGetter.get()); + } else { + fileGetter = () -> rootProject.tarTree(rootProject.getResources().gzip(jdkArchiveGetter.get())); + } + String extractDir = rootProject.getBuildDir().toPath().resolve("jdks/openjdk-" + jdkVersion + "_" + platform).toString(); + TaskProvider extractTask = rootProject.getTasks().register(extractTaskName, Copy.class, copyTask -> { + copyTask.doFirst(t -> rootProject.delete(extractDir)); + copyTask.into(extractDir); + copyTask.from(fileGetter, removeRootDir); + }); + rootProject.getArtifacts().add(localConfigName, + rootProject.getLayout().getProjectDirectory().dir(extractDir), + artifact -> artifact.builtBy(extractTask)); + } + + private static String configName(String prefix, String version, String platform) { + return prefix + "_" + version + "_" + platform; + } + + private static String capitalize(String s) { + return s.substring(0, 1).toUpperCase(Locale.ROOT) + s.substring(1); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java index a8680ef13dd..3ac4a53910c 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/http/WaitForHttpResource.java @@ -123,8 +123,7 @@ public class WaitForHttpResource { if (System.nanoTime() < waitUntil) { Thread.sleep(sleep); } else { - logger.error("Failed to access url [{}]", url, failure); - return false; + throw failure; } } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java index 48569ecd8b2..0cb7ee0c10f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchCluster.java @@ -22,23 +22,22 @@ import org.elasticsearch.GradleServicesAdapter; import org.elasticsearch.gradle.Distribution; import org.elasticsearch.gradle.FileSupplier; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.http.WaitForHttpResource; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; -import java.io.BufferedReader; import java.io.File; import java.io.IOException; -import java.io.InputStreamReader; import java.io.UncheckedIOException; -import java.net.HttpURLConnection; import java.net.URI; -import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.security.GeneralSecurityException; import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -75,6 +74,8 @@ public class ElasticsearchCluster implements TestClusterConfiguration { services, artifactsExtractDir, workingDirBase ) ); + + addWaitForClusterHealth(); } public void setNumberOfNodes(int numberOfNodes) { @@ -219,6 +220,11 @@ public class ElasticsearchCluster implements TestClusterConfiguration { nodes.all(node -> node.extraConfigFile(destination, from)); } + @Override + public void user(Map userSpec) { + nodes.all(node -> node.user(userSpec)); + } + private void writeUnicastHostsFiles() { String unicastUris = nodes.stream().flatMap(node -> node.getAllTransportPortURI().stream()).collect(Collectors.joining("\n")); nodes.forEach(node -> { @@ -262,9 +268,6 @@ public class ElasticsearchCluster implements TestClusterConfiguration { writeUnicastHostsFiles(); LOGGER.info("Starting to wait for cluster to form"); - addWaitForUri( - "cluster health yellow", "/_cluster/health?wait_for_nodes=>=" + nodes.size() + "&wait_for_status=yellow" - ); waitForConditions(waitConditions, startedAt, CLUSTER_UP_TIMEOUT, CLUSTER_UP_TIMEOUT_UNIT, this); } @@ -279,7 +282,9 @@ public class ElasticsearchCluster implements TestClusterConfiguration { } void eachVersionedDistribution(BiConsumer consumer) { - nodes.forEach(each -> consumer.accept(each.getVersion(), each.getDistribution())); + nodes.forEach(each -> { + consumer.accept(each.getVersion(), each.getDistribution()); + }); } public ElasticsearchNode singleNode() { @@ -291,21 +296,25 @@ public class ElasticsearchCluster implements TestClusterConfiguration { return getFirstNode(); } - private void addWaitForUri(String description, String uri) { - waitConditions.put(description, (node) -> { + private void addWaitForClusterHealth() { + waitConditions.put("cluster health yellow", (node) -> { try { - URL url = new URL("http://" + getFirstNode().getHttpSocketURI() + uri); - HttpURLConnection con = (HttpURLConnection) url.openConnection(); - con.setRequestMethod("GET"); - con.setConnectTimeout(500); - con.setReadTimeout(500); - try (BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream()))) { - String response = reader.lines().collect(Collectors.joining("\n")); - LOGGER.info("{} -> {} ->\n{}", this, uri, response); + WaitForHttpResource wait = new WaitForHttpResource( + "http", getFirstNode().getHttpSocketURI(), nodes.size() + ); + List> credentials = getFirstNode().getCredentials(); + if (getFirstNode().getCredentials().isEmpty() == false) { + wait.setUsername(credentials.get(0).get("useradd")); + wait.setPassword(credentials.get(0).get("-p")); } - return true; + return wait.wait(500); } catch (IOException e) { throw new IllegalStateException("Connection attempt to " + this + " failed", e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new TestClustersException("Interrupted while waiting for " + this, e); + } catch (GeneralSecurityException e) { + throw new RuntimeException("security exception", e); } }); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index b7ba4377a1a..3bb1fb2ddb6 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -38,6 +38,7 @@ import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; @@ -86,6 +87,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { private final Map> environment = new LinkedHashMap<>(); private final Map extraConfigFiles = new HashMap<>(); final LinkedHashMap defaultConfig = new LinkedHashMap<>(); + private final List> credentials = new ArrayList<>(); private final Path confPathRepo; private final Path configFile; @@ -117,8 +119,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { esStdoutFile = confPathLogs.resolve("es.stdout.log"); esStderrFile = confPathLogs.resolve("es.stderr.log"); tmpDir = workingDir.resolve("tmp"); - waitConditions.put("http ports file", node -> Files.exists(((ElasticsearchNode) node).httpPortsFile)); - waitConditions.put("transport ports file", node -> Files.exists(((ElasticsearchNode)node).transportPortFile)); + waitConditions.put("ports files", this::checkPortsFilesExistWithDelay); } public String getName() { @@ -276,7 +277,7 @@ public class ElasticsearchNode implements TestClusterConfiguration { Path distroArtifact = artifactsExtractDir .resolve(distribution.getGroup()) - .resolve(distribution.getArtifactName() + "-" + getVersion()); + .resolve("elasticsearch-" + getVersion()); if (Files.exists(distroArtifact) == false) { throw new TestClustersException("Can not start " + this + ", missing: " + distroArtifact); @@ -319,9 +320,25 @@ public class ElasticsearchNode implements TestClusterConfiguration { copyExtraConfigFiles(); + if (isSettingMissingOrTrue("xpack.security.enabled")) { + if (credentials.isEmpty()) { + user(Collections.emptyMap()); + } + credentials.forEach(paramMap -> runElaticsearchBinScript( + "elasticsearch-users", + paramMap.entrySet().stream() + .flatMap(entry -> Stream.of(entry.getKey(), entry.getValue())) + .toArray(String[]::new) + )); + } + startElasticsearchProcess(); } + private boolean isSettingMissingOrTrue(String name) { + return Boolean.valueOf(settings.getOrDefault(name, () -> "false").get().toString()); + } + private void copyExtraConfigFiles() { extraConfigFiles.forEach((destination, from) -> { if (Files.exists(from.toPath()) == false) { @@ -375,6 +392,22 @@ public class ElasticsearchNode implements TestClusterConfiguration { extraConfigFiles.put(destination, from); } + @Override + public void user(Map userSpec) { + Set keys = new HashSet<>(userSpec.keySet()); + keys.remove("username"); + keys.remove("password"); + keys.remove("role"); + if (keys.isEmpty() == false) { + throw new TestClustersException("Unknown keys in user definition " + keys + " for " + this); + } + Map cred = new LinkedHashMap<>(); + cred.put("useradd", userSpec.getOrDefault("username","test_user")); + cred.put("-p", userSpec.getOrDefault("password","x-pack-test-password")); + cred.put("-r", userSpec.getOrDefault("role", "superuser")); + credentials.add(cred); + } + private void runElaticsearchBinScriptWithInput(String input, String tool, String... args) { try (InputStream byteArrayInputStream = new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8))) { services.loggedExec(spec -> { @@ -752,4 +785,21 @@ public class ElasticsearchNode implements TestClusterConfiguration { public String toString() { return "node{" + path + ":" + name + "}"; } + + List> getCredentials() { + return credentials; + } + + private boolean checkPortsFilesExistWithDelay(TestClusterConfiguration node) { + if (Files.exists(httpPortsFile) && Files.exists(transportPortFile)) { + return true; + } + try { + Thread.sleep(500); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new TestClustersException("Interrupted while waiting for ports files", e); + } + return Files.exists(httpPortsFile) && Files.exists(transportPortFile); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java index 39f9683ff48..628dadcbb9d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClusterConfiguration.java @@ -27,6 +27,7 @@ import java.io.File; import java.net.URI; import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import java.util.function.Supplier; @@ -72,6 +73,8 @@ public interface TestClusterConfiguration { void extraConfigFile(String destination, File from); + void user(Map userSpec); + String getHttpSocketURI(); String getTransportPortURI(); @@ -108,7 +111,7 @@ public interface TestClusterConfiguration { break; } } catch (TestClustersException e) { - throw new TestClustersException(e); + throw e; } catch (Exception e) { if (lastException == null) { lastException = e; @@ -116,12 +119,6 @@ public interface TestClusterConfiguration { lastException = e; } } - try { - Thread.sleep(500); - } - catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } if (conditionMet == false) { String message = "`" + context + "` failed to wait for " + description + " after " + diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index e9586f4c4ba..c1ed6b770f0 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -20,17 +20,18 @@ package org.elasticsearch.gradle.testclusters; import groovy.lang.Closure; import org.elasticsearch.gradle.BwcVersions; -import org.elasticsearch.gradle.Distribution; import org.elasticsearch.gradle.Version; +import org.elasticsearch.gradle.tool.Boilerplate; import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.artifacts.repositories.MavenArtifactRepository; +import org.gradle.api.credentials.HttpHeaderCredentials; import org.gradle.api.execution.TaskActionListener; import org.gradle.api.execution.TaskExecutionListener; -import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; @@ -46,7 +47,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -56,7 +56,7 @@ public class TestClustersPlugin implements Plugin { private static final String LIST_TASK_NAME = "listTestClusters"; private static final String NODE_EXTENSION_NAME = "testClusters"; - private static final String HELPER_CONFIGURATION_NAME = "testclusters"; + private static final String HELPER_CONFIGURATION_PREFIX = "testclusters"; private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts"; private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1; private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES; @@ -69,6 +69,10 @@ public class TestClustersPlugin implements Plugin { private final Thread shutdownHook = new Thread(this::shutDownAllClusters); private ExecutorService executorService = Executors.newSingleThreadExecutor(); + public static String getHelperConfigurationName(String version) { + return HELPER_CONFIGURATION_PREFIX + "-" + version; + } + @Override public void apply(Project project) { Project rootProject = project.getRootProject(); @@ -82,47 +86,6 @@ public class TestClustersPlugin implements Plugin { // create DSL for tasks to mark clusters these use createUseClusterTaskExtension(project, container); - if (rootProject.getConfigurations().findByName(HELPER_CONFIGURATION_NAME) == null) { - // We use a single configuration on the root project to resolve all testcluster dependencies ( like distros ) - // at once, only once without the need to repeat it for each project. This pays off assuming that most - // projects use the same dependencies. - Configuration helperConfiguration = project.getRootProject().getConfigurations().create(HELPER_CONFIGURATION_NAME); - helperConfiguration.setDescription( - "Internal helper configuration used by cluster configuration to download " + - "ES distributions and plugins." - ); - - // We have a single task to sync the helper configuration to "artifacts dir" - // the clusters will look for artifacts there based on the naming conventions. - // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in - // the build. - rootProject.getTasks().create(SYNC_ARTIFACTS_TASK_NAME, sync -> { - sync.getInputs().files((Callable) helperConfiguration::getAsFileTree); - sync.getOutputs().dir(new File(project.getRootProject().getBuildDir(), "testclusters/extract")); - // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) - sync.doLast(new Action() { - @Override - public void execute(Task task) { - project.sync(spec -> - helperConfiguration.getResolvedConfiguration().getResolvedArtifacts().forEach(resolvedArtifact -> { - final FileTree files; - File file = resolvedArtifact.getFile(); - if (file.getName().endsWith(".zip")) { - files = project.zipTree(file); - } else if (file.getName().endsWith("tar.gz")) { - files = project.tarTree(file); - } else { - throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); - } - spec.from(files).into(new File(project.getRootProject().getBuildDir(), "testclusters/extract") + "/" + - resolvedArtifact.getModuleVersion().getId().getGroup() - ); - })); - } - }); - }); - } - // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters // that are defined in the build script and the ones that will actually be used in this invocation of gradle // we use this information to determine when the last task that required the cluster executed so that we can @@ -143,6 +106,10 @@ public class TestClustersPlugin implements Plugin { autoConfigureClusterDependencies(project, rootProject, container); } + private static File getExtractDir(Project project) { + return new File(project.getRootProject().getBuildDir(), "testclusters/extract/"); + } + private NamedDomainObjectContainer createTestClustersContainerExtension(Project project) { // Create an extensions that allows describing clusters NamedDomainObjectContainer container = project.container( @@ -290,12 +257,59 @@ public class TestClustersPlugin implements Plugin { Project rootProject, NamedDomainObjectContainer container ) { + // Download integ test distribution from maven central + MavenArtifactRepository mavenCentral = project.getRepositories().mavenCentral(); + mavenCentral.content(spec -> { + spec.includeGroupByRegex("org\\.elasticsearch\\.distribution\\..*"); + }); + + // Other distributions from the download service + project.getRepositories().add( + project.getRepositories().ivy(spec -> { + spec.setUrl("https://artifacts.elastic.co/downloads"); + spec.patternLayout(p -> p.artifact("elasticsearch/[module]-[revision](-[classifier]).[ext]")); + HttpHeaderCredentials headerConfig = spec.getCredentials(HttpHeaderCredentials.class); + headerConfig.setName("X-Elastic-No-KPI"); + headerConfig.setValue("1"); + spec.content(c-> c.includeGroupByRegex("org\\.elasticsearch\\.distribution\\..*")); + }) + ); + + // We have a single task to sync the helper configuration to "artifacts dir" + // the clusters will look for artifacts there based on the naming conventions. + // Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in + // the build. + Task sync = Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> { + onCreate.getOutputs().dir(getExtractDir(rootProject)); + // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) + onCreate.doFirst(new Action() { + @Override + public void execute(Task task) { + // Clean up the extract dir first to make sure we have no stale files from older + // previous builds of the same distribution + project.delete(getExtractDir(rootProject)); + } + }); + }); + // When the project evaluated we know of all tasks that use clusters. // Each of these have to depend on the artifacts being synced. // We need afterEvaluate here despite the fact that container is a domain object, we can't implement this with // all because fields can change after the fact. project.afterEvaluate(ip -> container.forEach(esCluster -> esCluster.eachVersionedDistribution((version, distribution) -> { + Configuration helperConfiguration = Boilerplate.maybeCreate( + rootProject.getConfigurations(), + getHelperConfigurationName(version), + onCreate -> + // We use a single configuration on the root project to resolve all testcluster dependencies ( like distros ) + // at once, only once without the need to repeat it for each project. This pays off assuming that most + // projects use the same dependencies. + onCreate.setDescription( + "Internal helper configuration used by cluster configuration to download " + + "ES distributions and plugins for " + version + ) + ); BwcVersions.UnreleasedVersionInfo unreleasedInfo; final List unreleased; { @@ -320,29 +334,42 @@ public class TestClustersPlugin implements Plugin { projectNotation.put("path", unreleasedInfo.gradleProjectPath); projectNotation.put("configuration", distribution.getLiveConfiguration()); rootProject.getDependencies().add( - HELPER_CONFIGURATION_NAME, + helperConfiguration.getName(), project.getDependencies().project(projectNotation) ); } else { - if (distribution.equals(Distribution.INTEG_TEST)) { - rootProject.getDependencies().add( - HELPER_CONFIGURATION_NAME, "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + version - ); - } else { - // declare dependencies to be downloaded from the download service. - // The BuildPlugin sets up the right repo for this to work - // TODO: move the repo definition in this plugin when ClusterFormationTasks is removed - String dependency = String.format( - "%s:%s:%s:%s@%s", - distribution.getGroup(), - distribution.getArtifactName(), - version, - distribution.getClassifier(), - distribution.getFileExtension() - ); - rootProject.getDependencies().add(HELPER_CONFIGURATION_NAME, dependency); - } + rootProject.getDependencies().add( + helperConfiguration.getName(), + distribution.getGroup() + ":" + + distribution.getArtifactName() + ":" + + version + + (distribution.getClassifier().isEmpty() ? "" : ":" + distribution.getClassifier()) + "@" + + distribution.getFileExtension()); + } + + sync.getInputs().files(helperConfiguration); + // NOTE: Gradle doesn't allow a lambda here ( fails at runtime ) + sync.doLast(new Action() { + @Override + public void execute(Task task) { + project.copy(spec -> + helperConfiguration.getResolvedConfiguration().getResolvedArtifacts().forEach(resolvedArtifact -> { + final FileTree files; + File file = resolvedArtifact.getFile(); + if (file.getName().endsWith(".zip")) { + files = project.zipTree(file); + } else if (file.getName().endsWith("tar.gz")) { + files = project.tarTree(file); + } else { + throw new IllegalArgumentException("Can't extract " + file + " unknown file extension"); + } + + spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup())); + spec.into(getExtractDir(project)); + })); + } + }); }))); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java index 67fc7473a4c..29b0b5def20 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java @@ -18,14 +18,33 @@ */ package org.elasticsearch.gradle.tool; +import org.gradle.api.Action; +import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; import org.gradle.api.plugins.JavaPluginConvention; import org.gradle.api.tasks.SourceSetContainer; +import java.util.Optional; + public abstract class Boilerplate { public static SourceSetContainer getJavaSourceSets(Project project) { return project.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); } + public static T maybeCreate(NamedDomainObjectContainer collection, String name) { + return Optional.ofNullable(collection.findByName(name)) + .orElse(collection.create(name)); + + } + public static T maybeCreate(NamedDomainObjectContainer collection, String name, Action action) { + return Optional.ofNullable(collection.findByName(name)) + .orElseGet(() -> { + T result = collection.create(name); + action.execute(result); + return result; + }); + + } + } diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.jdk-download.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.jdk-download.properties new file mode 100644 index 00000000000..7568724a32a --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.jdk-download.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.JdkDownloadPlugin \ No newline at end of file diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 4b52d47937f..762bcc5ff9b 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -99,6 +99,7 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase { "buildscript {\n" + " repositories {\n" + " maven {\n" + + " name = \"test\"\n" + " url = '" + getLocalTestRepoPath() + "'\n" + " }\n" + " }\n" + @@ -117,12 +118,14 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase { String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision"); if (luceneSnapshotRepo != null) { luceneSnapshotRepo = " maven {\n" + - " url \"http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + "\"\n" + + " name \"lucene-snapshots\"\n" + + " url \"https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + "\"\n" + " }\n"; } writeBuildScript("\n" + "repositories {\n" + " maven {\n" + + " name \"test\"\n" + " url \"" + getLocalTestRepoPath() + "\"\n" + " }\n" + " flatDir {\n" + diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java new file mode 100644 index 00000000000..5f982e1b47d --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import com.github.tomakehurst.wiremock.WireMockServer; +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; +import org.gradle.testkit.runner.GradleRunner; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.function.Consumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.get; +import static com.github.tomakehurst.wiremock.client.WireMock.head; +import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; +import static org.hamcrest.CoreMatchers.equalTo; + +public class JdkDownloadPluginIT extends GradleIntegrationTestCase { + + private static final String FAKE_JDK_VERSION = "1.0.2+99"; + private static final Pattern JDK_HOME_LOGLINE = Pattern.compile("JDK HOME: (.*)"); + private static final Pattern NUM_CONFIGS_LOGLINE = Pattern.compile("NUM CONFIGS: (.*)"); + + public void testLinuxExtraction() throws IOException { + assertExtraction("getLinuxJdk", "linux", "bin/java"); + } + + public void testDarwinExtraction() throws IOException { + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java"); + } + + public void testWindowsExtraction() throws IOException { + assertExtraction("getWindowsJdk", "windows", "bin/java"); + } + + public void testCrossProjectReuse() throws IOException { + runBuild("numConfigurations", "linux", result -> { + Matcher matcher = NUM_CONFIGS_LOGLINE.matcher(result.getOutput()); + assertTrue("could not find num configs in output: " + result.getOutput(), matcher.find()); + assertThat(Integer.parseInt(matcher.group(1)), equalTo(6)); // 3 import configs, 3 export configs + }); + } + + public void assertExtraction(String taskname, String platform, String javaBin) throws IOException { + runBuild(taskname, platform, result -> { + Matcher matcher = JDK_HOME_LOGLINE.matcher(result.getOutput()); + assertTrue("could not find jdk home in output: " + result.getOutput(), matcher.find()); + String jdkHome = matcher.group(1); + Path javaPath = Paths.get(jdkHome, javaBin); + assertTrue(javaPath.toString(), Files.exists(javaPath)); + }); + } + + private void runBuild(String taskname, String platform, Consumer assertions) throws IOException { + WireMockServer wireMock = new WireMockServer(0); + try { + String extension = platform.equals("windows") ? "zip" : "tar.gz"; + String filename = "openjdk-1.0.2_" + platform + "-x64_bin." + extension; + wireMock.stubFor(head(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) + .willReturn(aResponse().withStatus(200))); + final byte[] filebytes; + try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream(filename)) { + filebytes = stream.readAllBytes(); + } + wireMock.stubFor(get(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename)) + .willReturn(aResponse().withStatus(200).withBody(filebytes))); + wireMock.start(); + + GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir("jdk-download")) + .withArguments(taskname, + "-Dlocal.repo.path=" + getLocalTestRepoPath(), + "-Dtests.jdk_version=" + FAKE_JDK_VERSION, + "-Dtests.jdk_repo=" + wireMock.baseUrl()) + .withPluginClasspath(); + + BuildResult result = runner.build(); + assertions.accept(result); + } catch (Exception e) { + // for debugging + System.err.println("missed requests: " + wireMock.findUnmatchedRequests().getRequests()); + throw e; + } finally { + wireMock.stop(); + } + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java new file mode 100644 index 00000000000..c6ca817e759 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import org.elasticsearch.gradle.test.GradleUnitTestCase; +import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Project; +import org.gradle.testfixtures.ProjectBuilder; +import org.junit.BeforeClass; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class JdkDownloadPluginTests extends GradleUnitTestCase { + private static Project rootProject; + + @BeforeClass + public static void setupRoot() { + rootProject = ProjectBuilder.builder().build(); + } + + public void testMissingVersion() { + assertJdkError(createProject(), "testjdk", null, "linux", "version not specified for jdk [testjdk]"); + } + + public void testMissingPlatform() { + assertJdkError(createProject(), "testjdk", "11.0.2+33", null, "platform not specified for jdk [testjdk]"); + } + + public void testUnknownPlatform() { + assertJdkError(createProject(), "testjdk", "11.0.2+33", "unknown", + "unknown platform [unknown] for jdk [testjdk], must be one of [linux, windows, darwin]"); + } + + public void testBadVersionFormat() { + assertJdkError(createProject(), "testjdk", "badversion", "linux", "malformed version [badversion] for jdk [testjdk]"); + } + + private void assertJdkError(Project project, String name, String version, String platform, String message) { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createJdk(project, name, version, platform)); + assertThat(e.getMessage(), equalTo(message)); + } + + private void createJdk(Project project, String name, String version, String platform) { + @SuppressWarnings("unchecked") + NamedDomainObjectContainer jdks = (NamedDomainObjectContainer) project.getExtensions().getByName("jdks"); + jdks.create(name, jdk -> { + if (version != null) { + jdk.setVersion(version); + } + if (platform != null) { + jdk.setPlatform(platform); + } + }).finalizeValues(); + } + + private Project createProject() { + Project project = ProjectBuilder.builder().withParent(rootProject).build(); + project.getPlugins().apply("elasticsearch.jdk-download"); + return project; + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java index c3262ee1e26..0fc26f0284c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/BaseTestCase.java @@ -22,6 +22,7 @@ import com.carrotsearch.randomizedtesting.JUnit4MethodProvider; import com.carrotsearch.randomizedtesting.RandomizedRunner; import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; +import junit.framework.AssertionFailedError; import org.junit.Assert; import org.junit.runner.RunWith; @@ -32,4 +33,24 @@ import org.junit.runner.RunWith; }) @ThreadLeakLingering(linger = 5000) // wait for "Connection worker" to die public abstract class BaseTestCase extends Assert { + + // add expectThrows from junit 5 + @FunctionalInterface + public interface ThrowingRunnable { + void run() throws Throwable; + } + public static T expectThrows(Class expectedType, ThrowingRunnable runnable) { + try { + runnable.run(); + } catch (Throwable e) { + if (expectedType.isInstance(e)) { + return expectedType.cast(e); + } + AssertionFailedError assertion = + new AssertionFailedError("Unexpected exception type, expected " + expectedType.getSimpleName() + " but got " + e); + assertion.initCause(e); + throw assertion; + } + throw new AssertionFailedError("Expected exception "+ expectedType.getSimpleName() + " but no exception was thrown"); + } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 7e2915e2341..9276e8d215c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -103,6 +103,14 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { ); } + public void testReleased() { + BuildResult result = getTestClustersRunner("testReleased").build(); + assertTaskSuccessful(result, ":testReleased"); + assertStartedAndStoppedOnce(result, "releasedVersionDefault-1"); + assertStartedAndStoppedOnce(result, "releasedVersionOSS-1"); + assertStartedAndStoppedOnce(result, "releasedVersionIntegTest-1"); + } + public void testIncremental() { BuildResult result = getTestClustersRunner("clean", ":user1").build(); assertTaskSuccessful(result, ":user1"); diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz new file mode 100644 index 00000000000..d38b03a4c2a Binary files /dev/null and b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_linux-x64_bin.tar.gz differ diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz new file mode 100644 index 00000000000..9ac1da5e181 Binary files /dev/null and b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_osx-x64_bin.tar.gz differ diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip new file mode 100644 index 00000000000..61b6b867397 Binary files /dev/null and b/buildSrc/src/test/resources/org/elasticsearch/gradle/openjdk-1.0.2_windows-x64_bin.zip differ diff --git a/buildSrc/src/testKit/elasticsearch.build/build.gradle b/buildSrc/src/testKit/elasticsearch.build/build.gradle index 8020935f67e..7a68fe59baa 100644 --- a/buildSrc/src/testKit/elasticsearch.build/build.gradle +++ b/buildSrc/src/testKit/elasticsearch.build/build.gradle @@ -16,6 +16,7 @@ repositories { jcenter() repositories { maven { + name "local-repo" url System.getProperty("local.repo.path") } } diff --git a/buildSrc/src/testKit/jarHell/build.gradle b/buildSrc/src/testKit/jarHell/build.gradle index cd423c9f99f..cb12ce03f51 100644 --- a/buildSrc/src/testKit/jarHell/build.gradle +++ b/buildSrc/src/testKit/jarHell/build.gradle @@ -15,6 +15,7 @@ repositories { jcenter() repositories { maven { + name "local" url System.getProperty("local.repo.path") } } diff --git a/buildSrc/src/testKit/jdk-download/build.gradle b/buildSrc/src/testKit/jdk-download/build.gradle new file mode 100644 index 00000000000..eb2aa0260a3 --- /dev/null +++ b/buildSrc/src/testKit/jdk-download/build.gradle @@ -0,0 +1,15 @@ + +project.gradle.projectsEvaluated { + // wire the jdk repo to wiremock + String fakeJdkRepo = Objects.requireNonNull(System.getProperty('tests.jdk_repo')) + String fakeJdkVersion = Objects.requireNonNull(System.getProperty('tests.jdk_version')) + println rootProject.repositories.asMap.keySet() + IvyArtifactRepository repository = (IvyArtifactRepository) rootProject.repositories.getByName("jdk_repo_${fakeJdkVersion}") + repository.setUrl(fakeJdkRepo) +} + +task numConfigurations { + doLast { + println "NUM CONFIGS: ${project.configurations.size()}" + } +} \ No newline at end of file diff --git a/buildSrc/src/testKit/jdk-download/reuse/build.gradle b/buildSrc/src/testKit/jdk-download/reuse/build.gradle new file mode 100644 index 00000000000..8a26a8121e9 --- /dev/null +++ b/buildSrc/src/testKit/jdk-download/reuse/build.gradle @@ -0,0 +1,9 @@ +evaluationDependsOn ':subproj' + +String fakeJdkVersion = Objects.requireNonNull(System.getProperty('tests.jdk_version')) +jdks { + linux_jdk { + version = fakeJdkVersion + platform = "linux" + } +} \ No newline at end of file diff --git a/buildSrc/src/testKit/jdk-download/settings.gradle b/buildSrc/src/testKit/jdk-download/settings.gradle new file mode 100644 index 00000000000..028de479afe --- /dev/null +++ b/buildSrc/src/testKit/jdk-download/settings.gradle @@ -0,0 +1 @@ +include 'subproj' \ No newline at end of file diff --git a/buildSrc/src/testKit/jdk-download/subproj/build.gradle b/buildSrc/src/testKit/jdk-download/subproj/build.gradle new file mode 100644 index 00000000000..8e8b5435b4a --- /dev/null +++ b/buildSrc/src/testKit/jdk-download/subproj/build.gradle @@ -0,0 +1,41 @@ +plugins { + id 'elasticsearch.jdk-download' +} + + +String fakeJdkVersion = Objects.requireNonNull(System.getProperty('tests.jdk_version')) +jdks { + linux { + version = fakeJdkVersion + platform = "linux" + } + darwin { + version = fakeJdkVersion + platform = "darwin" + } + windows { + version = fakeJdkVersion + platform = "windows" + } +} + +task getLinuxJdk { + dependsOn jdks.linux + doLast { + println "JDK HOME: " + jdks.linux + } +} + +task getDarwinJdk { + dependsOn jdks.darwin + doLast { + println "JDK HOME: " + jdks.darwin + } +} + +task getWindowsJdk { + dependsOn jdks.windows + doLast { + println "JDK HOME: " + jdks.windows + } +} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index f82013bbc73..e4f912a3d7a 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -9,16 +9,16 @@ allprojects { all -> dir System.getProperty("test.local-test-downloads-path") } maven { + name "local" url System.getProperty("local.repo.path") } String luceneSnapshotRevision = System.getProperty("test.lucene-snapshot-revision") if (luceneSnapshotRevision != null) { maven { - url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision + name "lucene-snapshots" + url "https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/" + luceneSnapshotRevision } } - - jcenter() } if (project == rootProject || project.name == "alpha" || project.name == "bravo") { @@ -58,6 +58,21 @@ testClusters { javaHome = file(System.getProperty('java.home')) numberOfNodes = 3 } + releasedVersionDefault { + version = "7.0.0" + distribution = 'DEFAULT' + javaHome = file(System.getProperty('java.home')) + } + releasedVersionOSS { + version = "7.0.0" + distribution = 'OSS' + javaHome = file(System.getProperty('java.home')) + } + releasedVersionIntegTest { + version = "7.0.0" + distribution = 'INTEG_TEST' + javaHome = file(System.getProperty('java.home')) + } } task multiNode { @@ -67,6 +82,17 @@ task multiNode { } } +task testReleased { + useCluster testClusters.releasedVersionDefault + useCluster testClusters.releasedVersionOSS + useCluster testClusters.releasedVersionIntegTest + doFirst { + println "$path: Cluster running @ ${testClusters.releasedVersionDefault.httpSocketURI}" + println "$path: Cluster running @ ${testClusters.releasedVersionOSS.httpSocketURI}" + println "$path: Cluster running @ ${testClusters.releasedVersionIntegTest.httpSocketURI}" + } +} + task printLog { useCluster testClusters.myTestCluster doFirst { diff --git a/buildSrc/src/testKit/thirdPartyAudit/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/build.gradle index 42e0a22ccea..725be970fd9 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/build.gradle @@ -14,6 +14,7 @@ repositories { * - version 0.0.2 has the same class and one extra file just to make the jar different */ maven { + name = "local-test" url = file("sample_jars/build/testrepo") } jcenter() diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java index 309a37fedf8..0e887994da7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java @@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; @@ -57,11 +58,11 @@ final class DataFrameRequestConverters { .addPathPart(Strings.collectionToCommaDelimitedString(getRequest.getId())) .build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); - if (getRequest.getFrom() != null) { - request.addParameter("from", getRequest.getFrom().toString()); + if (getRequest.getPageParams() != null && getRequest.getPageParams().getFrom() != null) { + request.addParameter(PageParams.FROM.getPreferredName(), getRequest.getPageParams().getFrom().toString()); } - if (getRequest.getSize() != null) { - request.addParameter("size", getRequest.getSize().toString()); + if (getRequest.getPageParams() != null && getRequest.getPageParams().getSize() != null) { + request.addParameter(PageParams.SIZE.getPreferredName(), getRequest.getPageParams().getSize().toString()); } return request; } @@ -120,6 +121,13 @@ final class DataFrameRequestConverters { .addPathPart(statsRequest.getId()) .addPathPartAsIs("_stats") .build(); - return new Request(HttpGet.METHOD_NAME, endpoint); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + if (statsRequest.getPageParams() != null && statsRequest.getPageParams().getFrom() != null) { + request.addParameter(PageParams.FROM.getPreferredName(), statsRequest.getPageParams().getFrom().toString()); + } + if (statsRequest.getPageParams() != null && statsRequest.getPageParams().getSize() != null) { + request.addParameter(PageParams.SIZE.getPreferredName(), statsRequest.getPageParams().getSize().toString()); + } + return request; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 073b92f84a3..99884ee49c8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -27,6 +27,7 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.nio.entity.NByteArrayEntity; import org.apache.lucene.util.BytesRef; import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.DeleteCalendarEventRequest; import org.elasticsearch.client.ml.DeleteCalendarJobRequest; @@ -71,7 +72,6 @@ import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.UpdateModelSnapshotRequest; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/PageParams.java similarity index 96% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/core/PageParams.java index 52d54188f70..64b42c4ef7b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/util/PageParams.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/PageParams.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.client.ml.job.util; +package org.elasticsearch.client.core; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -57,11 +57,11 @@ public class PageParams implements ToXContentObject { this.size = size; } - public int getFrom() { + public Integer getFrom() { return from; } - public int getSize() { + public Integer getSize() { return size; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java index 9577a0f5c72..c50f37a27c8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java @@ -21,6 +21,7 @@ package org.elasticsearch.client.dataframe; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; +import org.elasticsearch.client.core.PageParams; import java.util.Arrays; import java.util.List; @@ -29,10 +30,6 @@ import java.util.Optional; public class GetDataFrameTransformRequest implements Validatable { - private final List ids; - private Integer from; - private Integer size; - /** * Helper method to create a request that will get ALL Data Frame Transforms * @return new {@link GetDataFrameTransformRequest} object for the id "_all" @@ -41,6 +38,9 @@ public class GetDataFrameTransformRequest implements Validatable { return new GetDataFrameTransformRequest("_all"); } + private final List ids; + private PageParams pageParams; + public GetDataFrameTransformRequest(String... ids) { this.ids = Arrays.asList(ids); } @@ -49,20 +49,12 @@ public class GetDataFrameTransformRequest implements Validatable { return ids; } - public Integer getFrom() { - return from; + public PageParams getPageParams() { + return pageParams; } - public void setFrom(Integer from) { - this.from = from; - } - - public Integer getSize() { - return size; - } - - public void setSize(Integer size) { - this.size = size; + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; } @Override @@ -78,7 +70,7 @@ public class GetDataFrameTransformRequest implements Validatable { @Override public int hashCode() { - return Objects.hash(ids); + return Objects.hash(ids, pageParams); } @Override @@ -91,6 +83,6 @@ public class GetDataFrameTransformRequest implements Validatable { return false; } GetDataFrameTransformRequest other = (GetDataFrameTransformRequest) obj; - return Objects.equals(ids, other.ids); + return Objects.equals(ids, other.ids) && Objects.equals(pageParams, other.pageParams); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java index e90c8a1e276..4a105f7b40c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java @@ -21,12 +21,14 @@ package org.elasticsearch.client.dataframe; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; +import org.elasticsearch.client.core.PageParams; import java.util.Objects; import java.util.Optional; public class GetDataFrameTransformStatsRequest implements Validatable { private final String id; + private PageParams pageParams; public GetDataFrameTransformStatsRequest(String id) { this.id = id; @@ -36,6 +38,14 @@ public class GetDataFrameTransformStatsRequest implements Validatable { return id; } + public PageParams getPageParams() { + return pageParams; + } + + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + @Override public Optional validate() { if (id == null) { @@ -49,7 +59,7 @@ public class GetDataFrameTransformStatsRequest implements Validatable { @Override public int hashCode() { - return Objects.hash(id); + return Objects.hash(id, pageParams); } @Override @@ -62,6 +72,6 @@ public class GetDataFrameTransformStatsRequest implements Validatable { return false; } GetDataFrameTransformStatsRequest other = (GetDataFrameTransformStatsRequest) obj; - return Objects.equals(id, other.id); + return Objects.equals(id, other.id) && Objects.equals(pageParams, other.pageParams); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java index 2adc2953c59..31b1fe48108 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java @@ -74,7 +74,4 @@ public class QueryConfig implements ToXContentObject { return Objects.equals(this.query, that.query); } - public boolean isValid() { - return this.query != null; - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfig.java index 2ebcf68d5ed..ceb5f1e1247 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfig.java @@ -74,7 +74,4 @@ public class AggregationConfig implements ToXContentObject { return Objects.equals(this.aggregations, that.aggregations); } - public boolean isValid() { - return this.aggregations != null; - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfig.java index ef1dd0f64e3..950d35e4054 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfig.java @@ -138,10 +138,6 @@ public class GroupConfig implements ToXContentObject { return groups; } - public boolean isValid() { - return this.groups != null; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java index 83b434d88ba..0c3a6e3ea89 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java @@ -97,10 +97,6 @@ public class PivotConfig implements ToXContentObject { return Objects.hash(groups, aggregationConfig); } - public boolean isValid() { - return groups.isValid() && aggregationConfig.isValid(); - } - public static Builder builder() { return new Builder(); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java index 5b4438fa2a1..655ecb2b975 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetBucketsRequest.java @@ -20,9 +20,9 @@ package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.results.Result; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsRequest.java index 5730e132df1..05bc234178e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsRequest.java @@ -21,9 +21,9 @@ package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java index 322efc19927..bcbae91693b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarsRequest.java @@ -21,13 +21,12 @@ package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.client.ml.job.util.PageParams; - import java.io.IOException; import java.util.Objects; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java index 4fc68793f00..b1000c3e4eb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCategoriesRequest.java @@ -20,8 +20,8 @@ package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetFiltersRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetFiltersRequest.java index 54bacdae108..a1808af23fc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetFiltersRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetFiltersRequest.java @@ -20,8 +20,8 @@ package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.job.config.MlFilter; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersRequest.java index cdcd40a41f8..8e5a45e7ba3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetInfluencersRequest.java @@ -20,8 +20,8 @@ package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetModelSnapshotsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetModelSnapshotsRequest.java index 8743f3043e5..acb138ac442 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetModelSnapshotsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetModelSnapshotsRequest.java @@ -20,8 +20,8 @@ package org.elasticsearch.client.ml; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java index f70459ab9b1..7aaa80ca345 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetRecordsRequest.java @@ -19,8 +19,8 @@ package org.elasticsearch.client.ml; import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.job.config.Job; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java index 8c6b1c60458..7a1e5e23893 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java @@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; @@ -43,7 +44,9 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Collections; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; public class DataFrameRequestConvertersTests extends ESTestCase { @@ -147,6 +150,23 @@ public class DataFrameRequestConvertersTests extends ESTestCase { assertEquals(HttpGet.METHOD_NAME, request.getMethod()); assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo/_stats")); + + assertFalse(request.getParameters().containsKey("from")); + assertFalse(request.getParameters().containsKey("size")); + + getStatsRequest.setPageParams(new PageParams(0, null)); + request = DataFrameRequestConverters.getDataFrameTransformStats(getStatsRequest); + assertThat(request.getParameters(), hasEntry("from", "0")); + assertEquals(null, request.getParameters().get("size")); + + getStatsRequest.setPageParams(new PageParams(null, 50)); + request = DataFrameRequestConverters.getDataFrameTransformStats(getStatsRequest); + assertEquals(null, request.getParameters().get("from")); + assertThat(request.getParameters(), hasEntry("size", "50")); + + getStatsRequest.setPageParams(new PageParams(0, 10)); + request = DataFrameRequestConverters.getDataFrameTransformStats(getStatsRequest); + assertThat(request.getParameters(), allOf(hasEntry("from", "0"), hasEntry("size", "10"))); } public void testGetDataFrameTransform() { @@ -159,11 +179,19 @@ public class DataFrameRequestConvertersTests extends ESTestCase { assertFalse(request.getParameters().containsKey("from")); assertFalse(request.getParameters().containsKey("size")); - getRequest.setFrom(0); - getRequest.setSize(10); + getRequest.setPageParams(new PageParams(0, null)); request = DataFrameRequestConverters.getDataFrameTransform(getRequest); - assertEquals("0", request.getParameters().get("from")); - assertEquals("10", request.getParameters().get("size")); + assertThat(request.getParameters(), hasEntry("from", "0")); + assertEquals(null, request.getParameters().get("size")); + + getRequest.setPageParams(new PageParams(null, 50)); + request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + assertEquals(null, request.getParameters().get("from")); + assertThat(request.getParameters(), hasEntry("size", "50")); + + getRequest.setPageParams(new PageParams(0, 10)); + request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + assertThat(request.getParameters(), allOf(hasEntry("from", "0"), hasEntry("size", "10"))); } public void testGetDataFrameTransform_givenMulitpleIds() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index bb1924b8b49..f01db621bc2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; @@ -217,8 +218,7 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { assertThat(getResponse.getTransformConfigurations(), hasSize(2)); assertEquals(transform, getResponse.getTransformConfigurations().get(1)); - getRequest.setFrom(0); - getRequest.setSize(1); + getRequest.setPageParams(new PageParams(0,1)); getResponse = execute(getRequest, client::getDataFrameTransform, client::getDataFrameTransformAsync); assertNull(getResponse.getInvalidTransforms()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 11faaf87972..fd867a12204 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.DeleteCalendarEventRequest; import org.elasticsearch.client.ml.DeleteCalendarJobRequest; @@ -82,7 +83,6 @@ import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.JobUpdateTests; import org.elasticsearch.client.ml.job.config.MlFilter; import org.elasticsearch.client.ml.job.config.MlFilterTests; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java index 9b364975c77..092bc254f50 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.client; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; import org.elasticsearch.client.ml.GetCategoriesRequest; @@ -43,7 +44,6 @@ import org.elasticsearch.client.ml.job.results.AnomalyRecord; import org.elasticsearch.client.ml.job.results.Bucket; import org.elasticsearch.client.ml.job.results.Influencer; import org.elasticsearch.client.ml.job.results.OverallBucket; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.junit.After; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index f7b7b148f66..25e610d6781 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; @@ -112,7 +113,6 @@ import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.MlFilter; import org.elasticsearch.client.ml.job.process.ModelSnapshot; import org.elasticsearch.client.ml.job.stats.JobStats; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 60dd2cb32ea..3c5059279b4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.core.IndexerState; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; @@ -554,7 +555,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest public void testGetDataFrameTransform() throws IOException, InterruptedException { createIndex("source-data"); - + QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build(); @@ -585,8 +586,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // end::get-data-frame-transform-request // tag::get-data-frame-transform-request-options - request.setFrom(0); // <1> - request.setSize(100); // <2> + request.setPageParams(new PageParams(0, 100)); // <1> // end::get-data-frame-transform-request-options // tag::get-data-frame-transform-execute diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index f4e3f86196f..fe7d04a4e0a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.client.MachineLearningIT; import org.elasticsearch.client.MlTestStateCleaner; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; @@ -137,7 +138,6 @@ import org.elasticsearch.client.ml.job.results.CategoryDefinition; import org.elasticsearch.client.ml.job.results.Influencer; import org.elasticsearch.client.ml.job.results.OverallBucket; import org.elasticsearch.client.ml.job.stats.JobStats; -import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java index d6379886912..d8012247dc5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetBucketsRequestTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsRequestTests.java index a85eda1ac74..8bc9d808ee3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsRequestTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java index b7ca44fd5fa..4ec6c72b83d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarsRequestTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java index 7d9fe2b238f..bcc697fdb20 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCategoriesRequestTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersRequestTests.java index 94937cd7815..5006a5f7d33 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetInfluencersRequestTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetModelSnapshotsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetModelSnapshotsRequestTests.java index 80a08f8c765..a15a80ac3d2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetModelSnapshotsRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetModelSnapshotsRequestTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java index f6f4b49889a..9b1c8699f4c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetRecordsRequestTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java index f74cedf1437..e099e03f1dd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/util/PageParamsTests.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.client.ml.util; -import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.client.core.PageParams; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index c7c58ad4ebd..14a7e566587 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -61,7 +61,7 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla } if (jdk) { into('jdk') { - with jdkFiles(platform) + with jdkFiles(project, platform) } } into('') { @@ -295,6 +295,10 @@ subprojects { } } +subprojects { + group = "org.elasticsearch.distribution.${name.startsWith("oss-") ? "oss" : "default"}" +} + /***************************************************************************** * Rest test config * *****************************************************************************/ @@ -302,6 +306,8 @@ configure(subprojects.findAll { it.name == 'integ-test-zip' }) { apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' + group = "org.elasticsearch.distribution.integ-test-zip" + integTest { includePackaged = true } @@ -321,23 +327,14 @@ configure(subprojects.findAll { it.name == 'integ-test-zip' }) { inputs.properties(project(':distribution').restTestExpansions) MavenFilteringHack.filter(it, project(':distribution').restTestExpansions) } -} -/***************************************************************************** - * Maven config * - *****************************************************************************/ -configure(subprojects.findAll { it.name.contains('zip') }) { - // only zip distributions go to maven + + // The integ-test-distribution is published to maven BuildPlugin.configurePomGeneration(project) apply plugin: 'nebula.info-scm' apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' - // note: the group must be correct before applying the nexus plugin, or - // it will capture the wrong value... - String subgroup = project.name == 'integ-test-zip' ? 'integ-test-zip' : 'zip' - project.group = "org.elasticsearch.distribution.${subgroup}" - // make the pom file name use elasticsearch instead of the project name archivesBaseName = "elasticsearch${it.name.contains('oss') ? '-oss' : ''}" @@ -378,3 +375,4 @@ configure(subprojects.findAll { it.name.contains('zip') }) { } } } + diff --git a/distribution/build.gradle b/distribution/build.gradle index c1818bf7455..a92b157eaba 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -17,18 +17,16 @@ * under the License. */ + +import org.apache.tools.ant.filters.FixCrLfFilter import org.elasticsearch.gradle.ConcatFilesTask import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RunTask -import org.apache.tools.ant.filters.FixCrLfFilter import java.nio.file.Files -import java.nio.file.Path -import java.util.regex.Matcher -import java.util.regex.Pattern - +import java.nio.file.Path /***************************************************************************** * Third party dependencies report * *****************************************************************************/ @@ -219,64 +217,6 @@ xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> copyLog4jProperties(buildDefaultLog4jConfig, xpackModule) } -/***************************************************************************** - * JDKs * - *****************************************************************************/ -// extract the bundled jdk version, broken into elements as: [feature, interim, update, build] -// Note the "patch" version is not yet handled here, as it has not yet been used by java. -Pattern JDK_VERSION = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+)@([a-f0-9]{32})?") -Matcher jdkVersionMatcher = JDK_VERSION.matcher(VersionProperties.bundledJdk) -if (jdkVersionMatcher.matches() == false) { - throw new IllegalArgumentException("Malformed jdk version [" + VersionProperties.bundledJdk + "]") -} -String jdkVersion = jdkVersionMatcher.group(1) + (jdkVersionMatcher.group(2) != null ? (jdkVersionMatcher.group(2)) : "") -String jdkMajor = jdkVersionMatcher.group(1) -String jdkBuild = jdkVersionMatcher.group(3) -String hash = jdkVersionMatcher.group(4) - -repositories { - // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back - ivy { - url "https://download.oracle.com" - patternLayout { - artifact "java/GA/jdk${jdkMajor}/${jdkBuild}/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" - } - } - // current pattern since 12.0.1 - ivy { - url "https://download.oracle.com" - patternLayout { - artifact "java/GA/jdk${jdkVersion}/${hash}/${jdkBuild}/GPL/openjdk-[revision]_[module]-x64_bin.[ext]" - } - } -} -for (String platform : ['linux', 'darwin', 'windows']) { - String jdkConfigName = "jdk_${platform}" - Configuration jdkConfig = configurations.create(jdkConfigName) - String extension = platform.equals('windows') ? 'zip' : 'tar.gz' - dependencies.add(jdkConfigName, "jdk:${platform.equals('darwin') ? 'osx' : platform}:${jdkVersion}@${extension}") - - int rootNdx = platform.equals('darwin') ? 2 : 1 - Closure removeRootDir = { - it.eachFile { FileCopyDetails details -> - details.relativePath = new RelativePath(true, details.relativePath.segments[rootNdx..-1] as String[]) - } - it.includeEmptyDirs false - } - String extractDir = "${buildDir}/jdks/openjdk-${jdkVersion}_${platform}" - project.task("extract${platform.capitalize()}Jdk", type: Copy) { - doFirst { - project.delete(extractDir) - } - into extractDir - if (extension.equals('zip')) { - from({ zipTree(jdkConfig.singleFile) }, removeRootDir) - } else { - from({ tarTree(resources.gzip(jdkConfig.singleFile)) }, removeRootDir) - } - } -} - // make sure we have a clean task since we aren't a java project, but we have tasks that // put stuff in the build dir task clean(type: Delete) { @@ -284,6 +224,9 @@ task clean(type: Delete) { } configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { + + apply plugin: 'elasticsearch.jdk-download' + // TODO: the map needs to be an input of the tasks, so that when it changes, the task will re-run... /***************************************************************************** * Properties to expand when copying packaging files * @@ -422,9 +365,15 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - jdkFiles = { platform -> - copySpec { - from project(':distribution').tasks.getByName("extract${platform.capitalize()}Jdk") + jdkFiles = { project, platform -> + project.jdks { + "bundled_${platform}" { + it.platform = platform + it.version = VersionProperties.bundledJdk + } + } + return copySpec { + from project.jdks."bundled_${platform}" eachFile { FileCopyDetails details -> if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') { details.mode = 0755 diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 56ad936bdfe..ab55c737783 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -17,34 +17,17 @@ dependencies { ossDockerSource project(path: ":distribution:archives:oss-linux-tar") } -ext.expansions = { oss -> +ext.expansions = { oss, local -> final String classifier = 'linux-x86_64' final String elasticsearch = oss ? "elasticsearch-oss-${VersionProperties.elasticsearch}-${classifier}.tar.gz" : "elasticsearch-${VersionProperties.elasticsearch}-${classifier}.tar.gz" return [ 'elasticsearch' : elasticsearch, 'license' : oss ? 'Apache-2.0' : 'Elastic License', - 'source_elasticsearch': local() ? "COPY $elasticsearch /opt/" : "RUN cd /opt && curl --retry 8 -s -L -O https://artifacts.elastic.co/downloads/elasticsearch/${elasticsearch} && cd -", + 'source_elasticsearch': local ? "COPY $elasticsearch /opt/" : "RUN cd /opt && curl --retry 8 -s -L -O https://artifacts.elastic.co/downloads/elasticsearch/${elasticsearch} && cd -", 'version' : VersionProperties.elasticsearch ] } -/* - * We need to be able to render a Dockerfile that references the official artifacts on https://artifacts.elastic.co. For this, we use a - * substitution in the Dockerfile template where we can either replace source_elasticsearch with a COPY from the Docker build context, or - * a RUN curl command to retrieve the artifact from https://artifacts.elastic.co. The system property build.docker.source, which can be - * either "local" (default) or "remote" controls which version of the Dockerfile is produced. - */ -private static boolean local() { - final String buildDockerSource = System.getProperty("build.docker.source") - if (buildDockerSource == null || "local".equals(buildDockerSource)) { - return true - } else if ("remote".equals(buildDockerSource)) { - return false - } else { - throw new IllegalArgumentException("expected build.docker.source to be [local] or [remote] but was [" + buildDockerSource + "]") - } -} - private static String files(final boolean oss) { return "build/${ oss ? 'oss-' : ''}docker" } @@ -53,39 +36,38 @@ private static String taskName(final String prefix, final boolean oss, final Str return "${prefix}${oss ? 'Oss' : ''}${suffix}" } -void addCopyDockerContextTask(final boolean oss) { - task(taskName("copy", oss, "DockerContext"), type: Sync) { - into files(oss) - - into('bin') { - from 'src/docker/bin' - } - - into('config') { - from 'src/docker/config' - } - - if (local()) { - if (oss) { - from configurations.ossDockerSource - } else { - from configurations.dockerSource +project.ext { + dockerBuildContext = { boolean oss, boolean local -> + copySpec { + into('bin') { + from project.projectDir.toPath().resolve("src/docker/bin") } - from configurations.dockerPlugins + into('config') { + from project.projectDir.toPath().resolve("src/docker/config") + } + + from(project.projectDir.toPath().resolve("src/docker/Dockerfile")) { + MavenFilteringHack.filter(it, expansions(oss, local)) + } } } } -void addCopyDockerfileTask(final boolean oss) { - task(taskName("copy", oss, "Dockerfile"), type: Copy) { - dependsOn taskName("copy", oss, "DockerContext") - inputs.properties(expansions(oss)) // ensure task is run when ext.expansions is changed +void addCopyDockerContextTask(final boolean oss) { + task(taskName("copy", oss, "DockerContext"), type: Sync) { + inputs.properties(expansions(oss, true)) into files(oss) - from('src/docker/Dockerfile') { - MavenFilteringHack.filter(it, expansions(oss)) + with dockerBuildContext(oss, true) + + if (oss) { + from configurations.ossDockerSource + } else { + from configurations.dockerSource } + + from configurations.dockerPlugins } } @@ -104,7 +86,6 @@ check.dependsOn postProcessFixture void addBuildDockerImage(final boolean oss) { final Task buildDockerImageTask = task(taskName("build", oss, "DockerImage"), type: LoggedExec) { dependsOn taskName("copy", oss, "DockerContext") - dependsOn taskName("copy", oss, "Dockerfile") List tags if (oss) { tags = [ @@ -132,7 +113,6 @@ void addBuildDockerImage(final boolean oss) { for (final boolean oss : [false, true]) { addCopyDockerContextTask(oss) - addCopyDockerfileTask(oss) addBuildDockerImage(oss) } diff --git a/distribution/docker/docker-build-context/build.gradle b/distribution/docker/docker-build-context/build.gradle new file mode 100644 index 00000000000..254407093ce --- /dev/null +++ b/distribution/docker/docker-build-context/build.gradle @@ -0,0 +1,11 @@ +apply plugin: 'base' + +task buildDockerBuildContext(type: Tar) { + extension = 'tar.gz' + compression = Compression.GZIP + archiveClassifier = "docker-build-context" + archiveBaseName = "elasticsearch" + with dockerBuildContext(false, false) +} + +assemble.dependsOn buildDockerBuildContext diff --git a/distribution/docker/oss-docker-build-context/build.gradle b/distribution/docker/oss-docker-build-context/build.gradle new file mode 100644 index 00000000000..248b260daa9 --- /dev/null +++ b/distribution/docker/oss-docker-build-context/build.gradle @@ -0,0 +1,11 @@ +apply plugin: 'base' + +task buildOssDockerBuildContext(type: Tar) { + extension = 'tar.gz' + compression = Compression.GZIP + archiveClassifier = "docker-build-context" + archiveBaseName = "elasticsearch-oss" + with dockerBuildContext(true, false) +} + +assemble.dependsOn buildOssDockerBuildContext diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index af1479d360c..72804c7a907 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -53,6 +53,7 @@ import java.util.regex.Pattern buildscript { repositories { maven { + name "gradle-plugins" url "https://plugins.gradle.org/m2/" } } @@ -142,7 +143,7 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk) { } if (jdk) { into('jdk') { - with jdkFiles('linux') + with jdkFiles(project, 'linux') } } // we need to specify every intermediate directory in these paths so the package managers know they are explicitly diff --git a/distribution/packages/src/common/scripts/postinst b/distribution/packages/src/common/scripts/postinst index 0a0b505b12b..b440bb80775 100644 --- a/distribution/packages/src/common/scripts/postinst +++ b/distribution/packages/src/common/scripts/postinst @@ -94,11 +94,15 @@ elif [ "$RESTART_ON_UPGRADE" = "true" ]; then fi # the equivalent code for rpm is in posttrans -if [ "$PACKAGE" = "deb" -a ! -f /etc/elasticsearch/elasticsearch.keystore ]; then - /usr/share/elasticsearch/bin/elasticsearch-keystore create - chown root:elasticsearch /etc/elasticsearch/elasticsearch.keystore - chmod 660 /etc/elasticsearch/elasticsearch.keystore - md5sum /etc/elasticsearch/elasticsearch.keystore > /etc/elasticsearch/.elasticsearch.keystore.initial_md5sum +if [ "$PACKAGE" = "deb" ]; then + if [ ! -f /etc/elasticsearch/elasticsearch.keystore ]; then + /usr/share/elasticsearch/bin/elasticsearch-keystore create + chown root:elasticsearch /etc/elasticsearch/elasticsearch.keystore + chmod 660 /etc/elasticsearch/elasticsearch.keystore + md5sum /etc/elasticsearch/elasticsearch.keystore > /etc/elasticsearch/.elasticsearch.keystore.initial_md5sum + else + /usr/share/elasticsearch/bin/elasticsearch-keystore upgrade + fi fi ${scripts.footer} diff --git a/distribution/packages/src/common/scripts/posttrans b/distribution/packages/src/common/scripts/posttrans index d3550bdbed2..fdb9aafba38 100644 --- a/distribution/packages/src/common/scripts/posttrans +++ b/distribution/packages/src/common/scripts/posttrans @@ -3,6 +3,8 @@ if [ ! -f /etc/elasticsearch/elasticsearch.keystore ]; then chown root:elasticsearch /etc/elasticsearch/elasticsearch.keystore chmod 660 /etc/elasticsearch/elasticsearch.keystore md5sum /etc/elasticsearch/elasticsearch.keystore > /etc/elasticsearch/.elasticsearch.keystore.initial_md5sum +else + /usr/share/elasticsearch/bin/elasticsearch-keystore upgrade fi ${scripts.footer} diff --git a/docs/build.gradle b/docs/build.gradle index 5b98a62d996..5816546d7e2 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -56,9 +56,6 @@ integTestCluster { extraConfigFile 'hunspell/en_US/en_US.dic', '../server/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic' // Whitelist reindexing from the local node so we can test it. setting 'reindex.remote.whitelist', '127.0.0.1:*' - - // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults - systemProperty 'es.scripting.update.ctx_in_params', 'false' } // build the cluster with all plugins diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index d85ad8350e9..4a7fd7482d2 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -57,7 +57,7 @@ For Maven: elastic-lucene-snapshots Elastic Lucene Snapshots - http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9 + https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9 true false @@ -68,7 +68,8 @@ For Gradle: ["source","groovy",subs="attributes"] -------------------------------------------------- maven { - url 'http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9' + name "lucene-snapshots" + url 'https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/00142c9' } -------------------------------------------------- diff --git a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc index 41fa841060b..ec2253b2c25 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc +++ b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc @@ -13,7 +13,7 @@ The API accepts a +{request}+ object and returns a +{response}+. ==== Get Data Frame Request A +{request}+ requires either a data frame transform id, a comma separated list of ids or -the special wildcard `_all` to get all {dataframe-transform}s +the special wildcard `_all` to get all {dataframe-transforms} ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -29,8 +29,10 @@ The following arguments are optional. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- -<1> Page {dataframe-transform}s starting from this value -<2> Return at most `size` {dataframe-transform}s +<1> The page parameters `from` and `size`. `from` specifies the number of +{dataframe-transforms} to skip. `size` specifies the maximum number of +{dataframe-transforms} to get. Defaults to `0` and `100` respectively. + include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index de337f70840..89912cc2a45 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -93,7 +93,7 @@ For Maven: elastic-lucene-snapshots Elastic Lucene Snapshots - http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835 + https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835 true false @@ -104,7 +104,8 @@ For Gradle: ["source","groovy",subs="attributes"] -------------------------------------------------- maven { - url 'http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835' + name 'lucene-snapshots' + url 'https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835' } -------------------------------------------------- diff --git a/docs/painless/painless-api-reference/index.asciidoc b/docs/painless/painless-api-reference/index.asciidoc index 88130f7fdfc..65290453322 100644 --- a/docs/painless/painless-api-reference/index.asciidoc +++ b/docs/painless/painless-api-reference/index.asciidoc @@ -10,7 +10,7 @@ |Aggs Reduce | <> | |Analysis | <> | <> |Bucket Aggregation | <> | -|Field | <> | +|Field | <> | <> |Filter | <> | |Ingest | <> | <> |Interval | <> | @@ -33,6 +33,7 @@ include::painless-api-reference-shared/index.asciidoc[] include::painless-api-reference-analysis/index.asciidoc[] +include::painless-api-reference-field/index.asciidoc[] include::painless-api-reference-ingest/index.asciidoc[] include::painless-api-reference-moving-function/index.asciidoc[] include::painless-api-reference-score/index.asciidoc[] diff --git a/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc index 8dc729b31ea..d09af700a2f 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-analysis/index.asciidoc @@ -7,6 +7,10 @@ The following specialized API is available in the Analysis context. * See the <> for further API available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.analysis.common <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc index 106f9272df4..ff272cb228f 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-analysis/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-analysis-org-elasticsearch-analysis-common"] === Analysis API for package org.elasticsearch.analysis.common -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-analysis-AnalysisPredicateScript-Token]] ==== AnalysisPredicateScript.Token diff --git a/docs/painless/painless-api-reference/painless-api-reference-field/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-field/index.asciidoc new file mode 100644 index 00000000000..eb71e71ccf1 --- /dev/null +++ b/docs/painless/painless-api-reference/painless-api-reference-field/index.asciidoc @@ -0,0 +1,17 @@ +// This file is auto-generated. Do not edit. + +[[painless-api-reference-field]] +=== Field API + +The following specialized API is available in the Field context. + +* See the <> for further API available in all contexts. + +==== Static Methods +The following methods are directly callable without a class/instance qualifier. Note parameters denoted by a (*) are treated as read-only values. + +* List domainSplit(String) +* List domainSplit(String, Map) + +include::packages.asciidoc[] + diff --git a/docs/painless/painless-api-reference/painless-api-reference-field/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-field/packages.asciidoc new file mode 100644 index 00000000000..282fcf136a6 --- /dev/null +++ b/docs/painless/painless-api-reference/painless-api-reference-field/packages.asciidoc @@ -0,0 +1,3 @@ +// This file is auto-generated. Do not edit. + + diff --git a/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc index e4067c24dce..ff70233defb 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-ingest/index.asciidoc @@ -7,6 +7,10 @@ The following specialized API is available in the Ingest context. * See the <> for further API available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.ingest.common <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc index b6a48ee7d5d..a4a5a4529cc 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-ingest/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-ingest-org-elasticsearch-ingest-common"] === Ingest API for package org.elasticsearch.ingest.common -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-ingest-Processors]] ==== Processors diff --git a/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc index 9d37e81a94f..93a88519d65 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-moving-function/index.asciidoc @@ -7,6 +7,10 @@ The following specialized API is available in the Moving Function context. * See the <> for further API available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.search.aggregations.pipeline <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc index 824aa23f7eb..bdd8b1fd73c 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-moving-function/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-moving-function-org-elasticsearch-search-aggregations-pipeline"] === Moving Function API for package org.elasticsearch.search.aggregations.pipeline -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-moving-function-MovingFunctions]] ==== MovingFunctions diff --git a/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc index fe9e0e1d235..d355a495e06 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-score/index.asciidoc @@ -7,6 +7,31 @@ The following specialized API is available in the Score context. * See the <> for further API available in all contexts. +==== Static Methods +The following methods are directly callable without a class/instance qualifier. Note parameters denoted by a (*) are treated as read-only values. + +* double cosineSimilarity(List *, VectorScriptDocValues.DenseVectorScriptDocValues) +* double cosineSimilaritySparse(Map *, VectorScriptDocValues.SparseVectorScriptDocValues) +* double decayDateExp(String *, String *, String *, double *, JodaCompatibleZonedDateTime) +* double decayDateGauss(String *, String *, String *, double *, JodaCompatibleZonedDateTime) +* double decayDateLinear(String *, String *, String *, double *, JodaCompatibleZonedDateTime) +* double decayGeoExp(String *, String *, String *, double *, GeoPoint) +* double decayGeoGauss(String *, String *, String *, double *, GeoPoint) +* double decayGeoLinear(String *, String *, String *, double *, GeoPoint) +* double decayNumericExp(double *, double *, double *, double *, double) +* double decayNumericGauss(double *, double *, double *, double *, double) +* double decayNumericLinear(double *, double *, double *, double *, double) +* double dotProduct(List, VectorScriptDocValues.DenseVectorScriptDocValues) +* double dotProductSparse(Map *, VectorScriptDocValues.SparseVectorScriptDocValues) +* double randomScore(int *) +* double randomScore(int *, String *) +* double saturation(double, double) +* double sigmoid(double, double, double) + +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== org.elasticsearch.index.query <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc index 287f7a223ca..10f0f1b6dae 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-score/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-score-org-elasticsearch-index-query"] === Score API for package org.elasticsearch.index.query -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-score-VectorScriptDocValues]] ==== VectorScriptDocValues diff --git a/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc index c349602a7b5..d5452ce8fab 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-shared/index.asciidoc @@ -5,6 +5,10 @@ The following API is available in all contexts. +==== Classes By Package +The following classes are available grouped by their respective packages. Click on a class to view details about the available methods and fields. + + ==== java.lang <> diff --git a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc index ed6e10e7b19..f6921410512 100644 --- a/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc +++ b/docs/painless/painless-api-reference/painless-api-reference-shared/packages.asciidoc @@ -3,7 +3,7 @@ [role="exclude",id="painless-api-reference-shared-java-lang"] === Shared API for package java.lang -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Appendable]] ==== Appendable @@ -1399,7 +1399,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-math"] === Shared API for package java.math -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BigDecimal]] ==== BigDecimal @@ -1557,7 +1557,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-text"] === Shared API for package java.text -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Annotation]] ==== Annotation @@ -2265,7 +2265,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time"] === Shared API for package java.time -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Clock]] ==== Clock @@ -3078,7 +3078,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-chrono"] === Shared API for package java.time.chrono -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-AbstractChronology]] ==== AbstractChronology @@ -3675,7 +3675,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-format"] === Shared API for package java.time.format -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-DateTimeFormatter]] ==== DateTimeFormatter @@ -3874,7 +3874,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-temporal"] === Shared API for package java.time.temporal -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ChronoField]] ==== ChronoField @@ -4166,7 +4166,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-time-zone"] === Shared API for package java.time.zone -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ZoneOffsetTransition]] ==== ZoneOffsetTransition @@ -4265,7 +4265,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util"] === Shared API for package java.util -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-AbstractCollection]] ==== AbstractCollection @@ -7194,7 +7194,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util-function"] === Shared API for package java.util.function -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BiConsumer]] ==== BiConsumer @@ -7582,7 +7582,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util-regex"] === Shared API for package java.util.regex -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Matcher]] ==== Matcher @@ -7635,7 +7635,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-java-util-stream"] === Shared API for package java.util.stream -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BaseStream]] ==== BaseStream @@ -7957,7 +7957,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-apache-lucene-util"] === Shared API for package org.apache.lucene.util -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-BytesRef]] ==== BytesRef @@ -7974,7 +7974,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-common-geo"] === Shared API for package org.elasticsearch.common.geo -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-GeoPoint]] ==== GeoPoint @@ -7987,7 +7987,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-fielddata"] === Shared API for package org.elasticsearch.index.fielddata -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ScriptDocValues-Booleans]] ==== ScriptDocValues.Booleans @@ -8386,7 +8386,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-mapper"] === Shared API for package org.elasticsearch.index.mapper -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-IpFieldMapper-IpFieldType-IpScriptDocValues]] ==== IpFieldMapper.IpFieldType.IpScriptDocValues @@ -8445,7 +8445,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-query"] === Shared API for package org.elasticsearch.index.query -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-IntervalFilterScript-Interval]] ==== IntervalFilterScript.Interval @@ -8459,7 +8459,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-index-similarity"] === Shared API for package org.elasticsearch.index.similarity -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-ScriptedSimilarity-Doc]] ==== ScriptedSimilarity.Doc @@ -8499,7 +8499,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-painless-api"] === Shared API for package org.elasticsearch.painless.api -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-Debug]] ==== Debug @@ -8511,7 +8511,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-script"] === Shared API for package org.elasticsearch.script -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-JodaCompatibleZonedDateTime]] ==== JodaCompatibleZonedDateTime @@ -8594,7 +8594,7 @@ See the <> for a high-level overview [role="exclude",id="painless-api-reference-shared-org-elasticsearch-search-lookup"] === Shared API for package org.elasticsearch.search.lookup -See the <> for a high-level overview of all packages. +See the <> for a high-level overview of all packages and classes. [[painless-api-reference-shared-FieldLookup]] ==== FieldLookup diff --git a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc index a92e2476ad7..50462cc2871 100644 --- a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc @@ -116,9 +116,8 @@ And it'd respond: duplicate of this token it has been removed from the token stream NOTE: The synonym and synonym_graph filters use their preceding analysis chain to -parse and analyse their synonym lists, and ignore any token filters in the chain -that produce multiple tokens at the same position. This means that any filters -within the multiplexer will be ignored for the purpose of synonyms. If you want to -use filters contained within the multiplexer for parsing synonyms (for example, to -apply stemming to the synonym lists), then you should append the synonym filter -to the relevant multiplexer filter list. +parse and analyse their synonym lists, and will throw an exception if that chain +contains token filters that produce multiple tokens at the same position. +If you want to apply synonyms to a token stream containing a multiplexer, then you +should append the synonym filter to each relevant multiplexer filter list, rather than +placing it after the multiplexer in the main token chain definition. diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index 2285c6f6e89..b434129626d 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -188,6 +188,10 @@ parsing synonyms, e.g. `asciifolding` will only produce the folded version of th token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an error. +If you need to build analyzers that include both multi-token filters and synonym +filters, consider using the <> filter, +with the multi-token filters in one branch and the synonym filter in the other. + WARNING: The synonym rules should not contain words that are removed by a filter that appears after in the chain (a `stop` filter for instance). Removing a term from a synonym rule breaks the matching at query time. diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 11072361946..139f7c3ab0a 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -177,3 +177,7 @@ multiple versions of a token may choose which version of the token to emit when parsing synonyms, e.g. `asciifolding` will only produce the folded version of the token. Others, e.g. `multiplexer`, `word_delimiter_graph` or `ngram` will throw an error. + +If you need to build analyzers that include both multi-token filters and synonym +filters, consider using the <> filter, +with the multi-token filters in one branch and the synonym filter in the other. diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/data-frames/apis/preview-transform.asciidoc index cfa1763df08..cf3f3cb96f9 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/data-frames/apis/preview-transform.asciidoc @@ -36,7 +36,9 @@ eCommerce sample data: -------------------------------------------------- POST _data_frame/transforms/_preview { - "source": "kibana_sample_data_ecommerce", + "source": { + "index": "kibana_sample_data_ecommerce" + }, "pivot": { "group_by": { "customer_id": { diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 6087a1ba3b3..e106c2b16ee 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -287,4 +287,4 @@ See <>. [float] [[bulk-partial-responses]] === Partial responses -To ensure fast responses, the multi search API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file +To ensure fast responses, the bulk API will respond with partial results if one or more shards fail. See <> for more information. \ No newline at end of file diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index a55fd98d91e..c5109ab9c9f 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -198,14 +198,37 @@ PUT my_index PUT my_index/_doc/1 { "name": { - "first": "Alice", - "middle": "Mary", - "last": "White" + "first": "John", + "middle": "Winston", + "last": "Lennon" } } -------------------------------------------------- // CONSOLE +Note that the `path_match` and `path_unmatch` parameters match on object paths +in addition to leaf fields. As an example, indexing the following document will +result in an error because the `path_match` setting also matches the object +field `name.title`, which can't be mapped as text: + +[source,js] +-------------------------------------------------- +PUT my_index/_doc/2 +{ + "name": { + "first": "Paul", + "last": "McCartney", + "title": { + "value": "Sir", + "category": "order of chivalry" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +// TEST[catch:bad_request] + [[template-variables]] ==== `{name}` and `{dynamic_type}` diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 4d26f30ada2..47952d61c49 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -270,7 +270,8 @@ Elasticsearch 7.x:: * Specifying types in requests is deprecated. For instance, indexing a document no longer requires a document `type`. The new index APIs are `PUT {index}/_doc/{id}` in case of explicit ids and `POST {index}/_doc` - for auto-generated ids. + for auto-generated ids. Note that in 7.0, `_doc` is a permanent part of the + path, and represents the endpoint name rather than the document type. * The `include_type_name` parameter in the index creation, index template, and mapping APIs will default to `false`. Setting the parameter at all will @@ -554,6 +555,10 @@ GET index/_doc/1 // CONSOLE // TEST[continued] +NOTE: In 7.0, `_doc` represents the endpoint name instead of the document type. +The `_doc` component is a permanent part of the path for the document `index`, +`get`, and `delete` APIs going forward, and will not be removed in 8.0. + For API paths that contain both a type and endpoint name like `_update`, in 7.0 the endpoint will immediately follow the index name: diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index 6e02a2f8a78..f206324dad2 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -11,5 +11,6 @@ For information about how to upgrade your cluster, see <>. -- +include::migrate_7_2.asciidoc[] include::migrate_7_1.asciidoc[] include::migrate_7_0.asciidoc[] diff --git a/docs/reference/migration/migrate_7_2.asciidoc b/docs/reference/migration/migrate_7_2.asciidoc new file mode 100644 index 00000000000..d8fee4be582 --- /dev/null +++ b/docs/reference/migration/migrate_7_2.asciidoc @@ -0,0 +1,31 @@ +[[breaking-changes-7.2]] +== Breaking changes in 7.2 +++++ +7.2 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to Elasticsearch 7.2. + +See also <> and <>. + +coming[7.2.0] + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] + +[[breaking_72_discovery_changes]] +=== Discovery changes + +[float] +==== Only a single port may be given for each seed host. + +In earlier versions you could include a range of ports in entries in the +`discovery.seed_hosts` list, but {es} used only the first port in the range and +unexpectedly ignored the rest. For instance if you set `discovery.seed_hosts: +"10.11.12.13:9300-9310"` then {es} would only use `10.11.12.13:9300` for +discovery. Seed host addresses containing port ranges are now rejected. diff --git a/docs/reference/modules/discovery/bootstrapping.asciidoc b/docs/reference/modules/discovery/bootstrapping.asciidoc index 0ba7d4b17ce..2b17af17ec5 100644 --- a/docs/reference/modules/discovery/bootstrapping.asciidoc +++ b/docs/reference/modules/discovery/bootstrapping.asciidoc @@ -10,11 +10,22 @@ data folder and freshly-started nodes that are joining an existing cluster obtain this information from the cluster's elected master. The initial set of master-eligible nodes is defined in the -<>. This is a list -of the <> or IP addresses of the master-eligible nodes in -the new cluster. If you do not configure `node.name` then it is set to the -node's hostname, so in this case you can use hostnames in -`cluster.initial_master_nodes` too. +<>. This should be +set to a list containing one of the following items for each master-eligible +node: + +- The <> of the node. +- The node's hostname if `node.name` is not set, because `node.name` defaults + to the node's hostname. You must use either the fully-qualified hostname or + the bare hostname <>. +- The IP address of the node's <>, if it is + not possible to use the `node.name` of the node. This is normally the IP + address to which <> resolves but + <>. +- The IP address and port of the node's publish address, in the form `IP:PORT`, + if it is not possible to use the `node.name` of the node and there are + multiple nodes sharing a single IP address. When you start a master-eligible node, you can provide this setting on the command line or in the `elasticsearch.yml` file. After the cluster has formed, @@ -47,9 +58,9 @@ cluster.initial_master_nodes: - master-c -------------------------------------------------- -You can use a mix of IP addresses and node names too. If there is more than one -Elasticsearch node with the same IP address then the transport port must also -be given to specify exactly which node is meant: +If it is not possible to use the names of the nodes then you can also use IP +addresses, or IP addresses and ports, or even a mix of IP addresses and node +names: [source,yaml] -------------------------------------------------- @@ -57,7 +68,7 @@ cluster.initial_master_nodes: - 10.0.10.101 - 10.0.10.102:9300 - 10.0.10.102:9301 - - master-node-hostname + - master-node-name -------------------------------------------------- Like all node settings, it is also possible to specify the initial set of master diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index d0f68e37730..3bea925f972 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -52,13 +52,14 @@ There are several thread pools, but the important ones include: Mainly for java client executing of action when listener threaded is set to true. Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`. -Changing a specific thread pool can be done by setting its type-specific parameters; for example, changing the `bulk` -thread pool to have more threads: +Changing a specific thread pool can be done by setting its type-specific +parameters; for example, changing the number of threads in the `write` thread +pool: [source,yaml] -------------------------------------------------- thread_pool: - bulk: + write: size: 30 -------------------------------------------------- @@ -87,7 +88,7 @@ full, it will abort the request. [source,yaml] -------------------------------------------------- thread_pool: - bulk: + write: size: 30 queue_size: 1000 -------------------------------------------------- diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index dc624c19039..9c7553e314c 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -1,100 +1,51 @@ [[query-dsl-exists-query]] === Exists Query -Returns documents that have at least one non-`null` value in the original field: +Returns documents that contain a value other than `null` or `[]` in a provided +field. + +[[exists-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- +---- GET /_search { "query": { - "exists" : { "field" : "user" } + "exists": { + "field": "user" + } } } --------------------------------------------------- +---- // CONSOLE -For instance, these documents would all match the above query: +[[exists-query-top-level-params]] +==== Top-level parameters for `exists` +`field`:: +Name of the field you wish to search. ++ +To return a document, this field must exist and contain a value other +than `null` or `[]`. These values can include: ++ +* Empty strings, such as `""` or `"-"` +* Arrays containing `null` and another value, such as `[null, "foo"]` +* A custom <>, defined in field mapping + +[[exists-query-notes]] +==== Notes + +[[find-docs-null-values]] +===== Find documents with null values +To find documents that contain only `null` values or `[]` in a provided field, +use the `must_not` <> with the `exists` +query. + +The following search returns documents that contain only `null` values or `[]` +in the `user` field. [source,js] --------------------------------------------------- -{ "user": "jane" } -{ "user": "" } <1> -{ "user": "-" } <2> -{ "user": ["jane"] } -{ "user": ["jane", null ] } <3> --------------------------------------------------- -// NOTCONSOLE -<1> An empty string is a non-`null` value. -<2> Even though the `standard` analyzer would emit zero tokens, the original field is non-`null`. -<3> At least one non-`null` value is required. - -These documents would *not* match the above query: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [] } <1> -{ "user": [null] } <2> -{ "foo": "bar" } <3> --------------------------------------------------- -// NOTCONSOLE -<1> This field has no values. -<2> At least one non-`null` value is required. -<3> The `user` field is missing completely. - -[float] -[[null-value-mapping]] -==== `null_value` mapping - -If the field mapping includes the <> setting -then explicit `null` values are replaced with the specified `null_value`. For -instance, if the `user` field were mapped as follows: - -[source,js] --------------------------------------------------- -PUT /example -{ - "mappings": { - "properties": { - "user": { - "type": "keyword", - "null_value": "_null_" - } - } - } -} --------------------------------------------------- -// CONSOLE - -then explicit `null` values would be indexed as the string `_null_`, and the -following docs would match the `exists` filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } --------------------------------------------------- -// NOTCONSOLE - -However, these docs--without explicit `null` values--would still have -no values in the `user` field and thus would not match the `exists` filter: - -[source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- -// NOTCONSOLE - -[[missing-query]] -==== `missing` query - -There isn't a `missing` query. Instead use the `exists` query inside a -`must_not` clause as follows: - -[source,js] --------------------------------------------------- +---- GET /_search { "query": { @@ -107,7 +58,5 @@ GET /_search } } } --------------------------------------------------- -// CONSOLE - -This query returns documents that have no value in the user field. +---- +// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 71fa61ee085..60b87baf3d7 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -305,10 +305,14 @@ There are a number of options for the `field_value_factor` function: | Modifier | Meaning | `none` | Do not apply any multiplier to the field value -| `log` | Take the https://en.wikipedia.org/wiki/Common_logarithm[common logarithm] of the field value +| `log` | Take the https://en.wikipedia.org/wiki/Common_logarithm[common logarithm] of the field value. + Because this function will return a negative value and cause an error if used on values + between 0 and 1, it is recommended to use `log1p` instead. | `log1p` | Add 1 to the field value and take the common logarithm | `log2p` | Add 2 to the field value and take the common logarithm -| `ln` | Take the https://en.wikipedia.org/wiki/Natural_logarithm[natural logarithm] of the field value +| `ln` | Take the https://en.wikipedia.org/wiki/Natural_logarithm[natural logarithm] of the field value. + Because this function will return a negative value and cause an error if used on values + between 0 and 1, it is recommended to use `ln1p` instead. | `ln1p` | Add 1 to the field value and take the natural logarithm | `ln2p` | Add 2 to the field value and take the natural logarithm | `square` | Square the field value (multiply it by itself) @@ -321,14 +325,17 @@ There are a number of options for the `field_value_factor` function: Value used if the document doesn't have that field. The modifier and factor are still applied to it as though it were read from the document. +NOTE: Scores produced by the `field_value_score` function must be +non-negative, otherwise an error will be thrown. The `log` and `ln` modifiers +will produce negative values if used on values between 0 and 1. Be sure to limit +the values of the field with a range filter to avoid this, or use `log1p` and +`ln1p`. - Keep in mind that taking the log() of 0, or the square root of a negative number - is an illegal operation, and an exception will be thrown. Be sure to limit the - values of the field with a range filter to avoid this, or use `log1p` and - `ln1p`. +NOTE: Keep in mind that taking the log() of 0, or the square root of a +negative number is an illegal operation, and an exception will be thrown. Be +sure to limit the values of the field with a range filter to avoid this, or use +`log1p` and `ln1p`. - NOTE: Scores produced by the `field_value_score` function must be non-negative, - otherwise an error will be thrown. [[function-decay]] ==== Decay functions diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 910123bbe61..25da70d0cf1 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -1,168 +1,220 @@ [[query-dsl-term-query]] === Term Query -The `term` query finds documents that contain the *exact* term specified -in the inverted index. For instance: +Returns documents that contain an *exact* term in a provided field. + +You can use the `term` query to find documents based on a precise value such as +a price, a product ID, or a username. + +[WARNING] +==== +Avoid using the `term` query for <> fields. + +By default, {es} changes the values of `text` fields as part of <>. This can make finding exact matches for `text` field values +difficult. + +To search `text` field values, use the <> query +instead. +==== + +[[term-query-ex-request]] +==== Example request [source,js] --------------------------------------------------- -POST _search +---- +GET /_search { - "query": { - "term" : { "user" : "Kimchy" } <1> - } -} --------------------------------------------------- -// CONSOLE -<1> Finds documents which contain the exact term `Kimchy` in the inverted index - of the `user` field. - -A `boost` parameter can be specified to give this `term` query a higher -relevance score than another query, for instance: - -[source,js] --------------------------------------------------- -GET _search -{ - "query": { - "bool": { - "should": [ - { - "term": { - "status": { - "value": "urgent", - "boost": 2.0 <1> + "query": { + "term": { + "user": { + "value": "Kimchy", + "boost": 1.0 } - } - }, - { - "term": { - "status": "normal" <2> - } } - ] } - } } --------------------------------------------------- +---- // CONSOLE -<1> The `urgent` query clause has a boost of `2.0`, meaning it is twice as important - as the query clause for `normal`. -<2> The `normal` clause has the default neutral boost of `1.0`. +[[term-top-level-params]] +==== Top-level parameters for `term` +``:: +Field you wish to search. -A `term` query can also match against <>. +[[term-field-params]] +==== Parameters for `` +`value`:: +Term you wish to find in the provided ``. To return a document, the term +must exactly match the field value, including whitespace and capitalization. -.Why doesn't the `term` query match my document? -************************************************** +`boost`:: +Floating point number used to decrease or increase the +<> of a query. Default is `1.0`. +Optional. ++ +You can use the `boost` parameter to adjust relevance scores for searches +containing two or more queries. ++ +Boost values are relative to the default value of `1.0`. A boost value between +`0` and `1.0` decreases the relevance score. A value greater than `1.0` +increases the relevance score. -String fields can be of type `text` (treated as full text, like the body of an -email), or `keyword` (treated as exact values, like an email address or a -zip code). Exact values (like numbers, dates, and keywords) have -the exact value specified in the field added to the inverted index in order -to make them searchable. +[[term-query-notes]] +==== Notes -However, `text` fields are `analyzed`. This means that their -values are first passed through an <> to produce a list of -terms, which are then added to the inverted index. +[[avoid-term-query-text-fields]] +===== Avoid using the `term` query for `text` fields +By default, {es} changes the values of `text` fields during analysis. For +example, the default <> changes +`text` field values as follows: -There are many ways to analyze text: the default -<> drops most punctuation, -breaks up text into individual words, and lower cases them. For instance, -the `standard` analyzer would turn the string ``Quick Brown Fox!'' into the -terms [`quick`, `brown`, `fox`]. +* Removes most punctuation +* Divides the remaining content into individual words, called +<> +* Lowercases the tokens -This analysis process makes it possible to search for individual words -within a big block of full text. +To better search `text` fields, the `match` query also analyzes your provided +search term before performing a search. This means the `match` query can search +`text` fields for analyzed tokens rather than an exact term. -The `term` query looks for the *exact* term in the field's inverted index -- -it doesn't know anything about the field's analyzer. This makes it useful for -looking up values in keyword fields, or in numeric or date -fields. When querying full text fields, use the -<> instead, which understands how the field -has been analyzed. +The `term` query does *not* analyze the search term. The `term` query only +searches for the *exact* term you provide. This means the `term` query may +return poor or no results when searching `text` fields. +To see the difference in search results, try the following example. -To demonstrate, try out the example below. First, create an index, specifying the field mappings, and index a document: +. Create an index with a `text` field called `full_text`. ++ +-- [source,js] --------------------------------------------------- +---- PUT my_index { - "mappings": { - "properties": { - "full_text": { - "type": "text" <1> - }, - "exact_value": { - "type": "keyword" <2> - } + "mappings" : { + "properties" : { + "full_text" : { "type" : "text" } + } } - } } - -PUT my_index/_doc/1 -{ - "full_text": "Quick Foxes!", <3> - "exact_value": "Quick Foxes!" <4> -} --------------------------------------------------- +---- // CONSOLE -<1> The `full_text` field is of type `text` and will be analyzed. -<2> The `exact_value` field is of type `keyword` and will NOT be analyzed. -<3> The `full_text` inverted index will contain the terms: [`quick`, `foxes`]. -<4> The `exact_value` inverted index will contain the exact term: [`Quick Foxes!`]. +-- -Now, compare the results for the `term` query and the `match` query: +. Index a document with a value of `Quick Brown Foxes!` in the `full_text` +field. ++ +-- [source,js] --------------------------------------------------- -GET my_index/_search +---- +PUT my_index/_doc/1 { - "query": { - "term": { - "exact_value": "Quick Foxes!" <1> - } - } + "full_text": "Quick Brown Foxes!" } - -GET my_index/_search -{ - "query": { - "term": { - "full_text": "Quick Foxes!" <2> - } - } -} - -GET my_index/_search -{ - "query": { - "term": { - "full_text": "foxes" <3> - } - } -} - -GET my_index/_search -{ - "query": { - "match": { - "full_text": "Quick Foxes!" <4> - } - } -} --------------------------------------------------- +---- // CONSOLE // TEST[continued] -<1> This query matches because the `exact_value` field contains the exact - term `Quick Foxes!`. -<2> This query does not match, because the `full_text` field only contains - the terms `quick` and `foxes`. It does not contain the exact term - `Quick Foxes!`. -<3> A `term` query for the term `foxes` matches the `full_text` field. -<4> This `match` query on the `full_text` field first analyzes the query string, - then looks for documents containing `quick` or `foxes` or both. -************************************************** +Because `full_text` is a `text` field, {es} changes `Quick Brown Foxes!` to +`[quick, brown, fox]` during analysis. + +-- + +. Use the `term` query to search for `Quick Brown Foxes!` in the `full_text` +field. Include the `pretty` parameter so the response is more readable. ++ +-- + +[source,js] +---- +GET my_index/_search?pretty +{ + "query": { + "term": { + "full_text": "Quick Brown Foxes!" + } + } +} +---- +// CONSOLE +// TEST[continued] + +Because the `full_text` field no longer contains the *exact* term `Quick Brown +Foxes!`, the `term` query search returns no results. + +-- + +. Use the `match` query to search for `Quick Brown Foxes!` in the `full_text` +field. ++ +-- + +//// + +[source,js] +---- +POST my_index/_refresh +---- +// CONSOLE +// TEST[continued] + +//// + +[source,js] +---- +GET my_index/_search?pretty +{ + "query": { + "match": { + "full_text": "Quick Brown Foxes!" + } + } +} +---- +// CONSOLE +// TEST[continued] + +Unlike the `term` query, the `match` query analyzes your provided search term, +`Quick Brown Foxes!`, before performing a search. The `match` query then returns +any documents containing the `quick`, `brown`, or `fox` tokens in the +`full_text` field. + +Here's the response for the `match` query search containing the indexed document +in the results. + +[source,js] +---- +{ + "took" : 1, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.8630463, + "hits" : [ + { + "_index" : "my_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 0.8630463, + "_source" : { + "full_text" : "Quick Brown Foxes!" + } + } + ] + } +} +---- +// TESTRESPONSE[s/"took" : 1/"took" : $body.took/] +-- \ No newline at end of file diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 1009c42e8b4..a5b93747dfa 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -9,6 +9,7 @@ directly to configure and access {xpack} features. * <> * <> +* <> * <> * <>, <> * <> diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc index 2588a49fc72..2bc2300174e 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc @@ -108,6 +108,7 @@ services: image: {docker-image} environment: - node.name=es01 + - discovery.seed_hosts=es02 - cluster.initial_master_nodes=es01,es02 - ELASTIC_PASSWORD=$ELASTIC_PASSWORD <1> - "ES_JAVA_OPTS=-Xms512m -Xmx512m" diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index dd2af0eeda9..426605f63d7 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -1484,10 +1484,15 @@ through the list of URLs will continue until a successful connection is made. [[ssl-tls-settings]] ==== Default values for TLS/SSL settings In general, the values below represent the default values for the various TLS -settings. For more information, see +settings. +The prefixes for these settings are based on the context in which they are +used (e.g. `xpack.security.authc.realms.ldap.corp_ldap.ssl.verification_mode` +or `xpack.security.transport.ssl.supported_protocols`). + +For more information, see {stack-ov}/encrypting-communications.html[Encrypting communications]. -`ssl.supported_protocols`:: +`*.ssl.supported_protocols`:: Supported protocols with versions. Valid protocols: `SSLv2Hello`, `SSLv3`, `TLSv1`, `TLSv1.1`, `TLSv1.2`, `TLSv1.3`. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`. @@ -1497,7 +1502,7 @@ NOTE: If `xpack.security.fips_mode.enabled` is `true`, you cannot use `SSLv2Hell or `SSLv3`. See <>. -- -`ssl.client_authentication`:: +`*.ssl.client_authentication`:: Controls the server's behavior in regard to requesting a certificate from client connections. Valid values are `required`, `optional`, and `none`. `required` forces a client to present a certificate, while `optional` @@ -1505,7 +1510,7 @@ requests a client certificate but the client is not required to present one. Defaults to `required`, except for HTTP, which defaults to `none`. See <>. -`ssl.verification_mode`:: +`*.ssl.verification_mode`:: Controls the verification of certificates. Valid values are: - `full`, which verifies that the provided certificate is signed by a trusted authority (CA) and also verifies that the server's hostname (or IP @@ -1520,7 +1525,7 @@ Controls the verification of certificates. Valid values are: + The default value is `full`. -`ssl.cipher_suites`:: +`*.ssl.cipher_suites`:: Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[ Java Cryptography Architecture documentation]. Defaults to `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`, diff --git a/docs/reference/setup/important-settings/discovery-settings.asciidoc b/docs/reference/setup/important-settings/discovery-settings.asciidoc index 5709ae3bb93..245852b2096 100644 --- a/docs/reference/setup/important-settings/discovery-settings.asciidoc +++ b/docs/reference/setup/important-settings/discovery-settings.asciidoc @@ -49,26 +49,23 @@ discovery.seed_hosts: - 192.168.1.10:9300 - 192.168.1.11 <1> - seeds.mydomain.com <2> -cluster.initial_master_nodes: - - master-node-a <3> - - 192.168.1.12 <4> - - 192.168.1.13:9301 <5> +cluster.initial_master_nodes: <3> + - master-node-a + - master-node-b + - master-node-c -------------------------------------------------- <1> The port will default to `transport.profiles.default.port` and fallback to `transport.port` if not specified. <2> If a hostname resolves to multiple IP addresses then the node will attempt to discover other nodes at all resolved addresses. -<3> Initial master nodes can be identified by their <>, - which defaults to the hostname. Make sure that the value in - `cluster.initial_master_nodes` matches the `node.name` exactly. If you use - a fully-qualified domain name such as `master-node-a.example.com` for your - node names then you must use the fully-qualified name in this list; - conversely if `node.name` is a bare hostname without any trailing - qualifiers then you must also omit the trailing qualifiers in - `cluster.initial_master_nodes`. -<4> Initial master nodes can also be identified by their IP address. -<5> If multiple master nodes share an IP address then the transport port must - be used to distinguish between them. +<3> The initial master nodes should be identified by their + <>, which defaults to their hostname. Make sure that + the value in `cluster.initial_master_nodes` matches the `node.name` + exactly. If you use a fully-qualified domain name such as + `master-node-a.example.com` for your node names then you must use the + fully-qualified name in this list; conversely if `node.name` is a bare + hostname without any trailing qualifiers then you must also omit the + trailing qualifiers in `cluster.initial_master_nodes`. For more information, see <> and <>. diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 172c7c1f17c..1fcc261d68e 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -62,6 +62,9 @@ ifeval::["{release-state}"!="unreleased"] docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" {docker-image} -------------------------------------------- +Note the use of <> that allows bypassing +the <> in a single-node development cluster. + endif::[] [[docker-cli-run-prod-mode]] diff --git a/docs/reference/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc index 9da482af849..24cef9c7369 100644 --- a/docs/reference/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -65,6 +65,7 @@ repositories { // Add the Elasticsearch Maven Repository maven { + name "elastic" url "https://artifacts.elastic.co/maven" } } diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc index 1cb028efaec..f57878107b6 100644 --- a/docs/reference/sql/functions/conditional.asciidoc +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -98,6 +98,30 @@ an error message would be returned, mentioning that *'foo'* is of data type *key which does not match the expected data type *integer* (based on result *10*). =============================== +[[sql-functions-conditional-case-groupby-custom-buckets]] +===== Conditional bucketing + +CASE can be used as a GROUP BY key in a query to facilitate custom bucketing +and assign descriptive names to those buckets. If, for example, the values +for a key are too many or, simply, ranges of those values are more +interesting than every single value, CASE can create custom buckets as in the +following example: + +[source, sql] +SELECT count(*) AS count, + CASE WHEN NVL(languages, 0) = 0 THEN 'zero' + WHEN languages = 1 THEN 'one' + WHEN languages = 2 THEN 'bilingual' + WHEN languages = 3 THEN 'trilingual' + ELSE 'multilingual' + END as lang_skills +FROM employees +GROUP BY lang_skills +ORDER BY lang_skills; + +With this query, one can create normal grouping buckets for values _0, 1, 2, 3_ with +descriptive names, and every value _>= 4_ falls into the _multilingual_ bucket. + [[sql-functions-conditional-coalesce]] ==== `COALESCE` diff --git a/docs/reference/sql/language/syntax/commands/select.asciidoc b/docs/reference/sql/language/syntax/commands/select.asciidoc index 26fdb2f337e..08ebe0ae964 100644 --- a/docs/reference/sql/language/syntax/commands/select.asciidoc +++ b/docs/reference/sql/language/syntax/commands/select.asciidoc @@ -204,6 +204,10 @@ Multiple aggregates used: include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndMultipleAggs] ---- +[TIP] +If custom bucketing is required, it can be achieved with the use of `<>`, +as shown <>. + [[sql-syntax-group-by-implicit]] ===== Implicit Grouping diff --git a/docs/reference/sql/limitations.asciidoc b/docs/reference/sql/limitations.asciidoc index 8f7868c892e..b9c59e31b3d 100644 --- a/docs/reference/sql/limitations.asciidoc +++ b/docs/reference/sql/limitations.asciidoc @@ -3,6 +3,14 @@ [[sql-limitations]] == SQL Limitations +[float] +[[large-parsing-trees]] +=== Large queries may throw `ParsingExpection` + +Extremely large queries can consume too much memory during the parsing phase, in which case the {es-sql} engine will +abort parsing and throw an error. In such cases, consider reducing the query to a smaller size by potentially +simplifying it or splitting it into smaller queries. + [float] [[sys-columns-describe-table-nested-fields]] === Nested fields in `SYS COLUMNS` and `DESCRIBE TABLE` diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java index 2d57faf5cb8..07333aa2eeb 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java @@ -34,6 +34,9 @@ public abstract class BytesWriteHandler implements ReadWriteHandler { return new FlushReadyWrite(context, (ByteBuffer[]) message, listener); } + @Override + public void channelRegistered() {} + @Override public List writeToBytes(WriteOperation writeOperation) { assert writeOperation instanceof FlushReadyWrite : "Write operation must be flush ready"; diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java b/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java index 2dfd53d27e1..5c3b519e390 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java @@ -27,7 +27,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; +import java.util.function.IntFunction; /** * This is a channel byte buffer composed internally of 16kb pages. When an entire message has been read @@ -37,15 +37,14 @@ import java.util.function.Supplier; */ public final class InboundChannelBuffer implements AutoCloseable { - private static final int PAGE_SIZE = 1 << 14; + public static final int PAGE_SIZE = 1 << 14; private static final int PAGE_MASK = PAGE_SIZE - 1; private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(PAGE_SIZE); private static final ByteBuffer[] EMPTY_BYTE_BUFFER_ARRAY = new ByteBuffer[0]; private static final Page[] EMPTY_BYTE_PAGE_ARRAY = new Page[0]; - - private final ArrayDeque pages; - private final Supplier pageSupplier; + private final IntFunction pageAllocator; + private final ArrayDeque pages = new ArrayDeque<>(); private final AtomicBoolean isClosed = new AtomicBoolean(false); private long capacity = 0; @@ -53,14 +52,12 @@ public final class InboundChannelBuffer implements AutoCloseable { // The offset is an int as it is the offset of where the bytes begin in the first buffer private int offset = 0; - public InboundChannelBuffer(Supplier pageSupplier) { - this.pageSupplier = pageSupplier; - this.pages = new ArrayDeque<>(); - this.capacity = PAGE_SIZE * pages.size(); + public InboundChannelBuffer(IntFunction pageAllocator) { + this.pageAllocator = pageAllocator; } public static InboundChannelBuffer allocatingInstance() { - return new InboundChannelBuffer(() -> new Page(ByteBuffer.allocate(PAGE_SIZE), () -> {})); + return new InboundChannelBuffer((n) -> new Page(ByteBuffer.allocate(n), () -> {})); } @Override @@ -87,7 +84,7 @@ public final class InboundChannelBuffer implements AutoCloseable { int numPages = numPages(requiredCapacity + offset); int pagesToAdd = numPages - pages.size(); for (int i = 0; i < pagesToAdd; i++) { - Page page = pageSupplier.get(); + Page page = pageAllocator.apply(PAGE_SIZE); pages.addLast(page); } capacity += pagesToAdd * PAGE_SIZE; diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java index 6b8688eccfd..92b276ad2d6 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java @@ -28,6 +28,11 @@ import java.util.function.BiConsumer; */ public interface ReadWriteHandler { + /** + * This method is called when the channel is registered with its selector. + */ + void channelRegistered(); + /** * This method is called when a message is queued with a channel. It can be called from any thread. * This method should validate that the message is a valid type and return a write operation object diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java index 1444422f7a7..22d85472126 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -20,6 +20,7 @@ package org.elasticsearch.nio; import org.elasticsearch.common.concurrent.CompletableContext; +import org.elasticsearch.nio.utils.ByteBufferUtils; import org.elasticsearch.nio.utils.ExceptionsHelper; import java.io.IOException; @@ -169,6 +170,7 @@ public abstract class SocketChannelContext extends ChannelContext @Override protected void register() throws IOException { super.register(); + readWriteHandler.channelRegistered(); if (allowChannelPredicate.test(channel) == false) { closeNow = true; } @@ -248,26 +250,6 @@ public abstract class SocketChannelContext extends ChannelContext // data that is copied to the buffer for a write, but not successfully flushed immediately, must be // copied again on the next call. - protected int readFromChannel(ByteBuffer buffer) throws IOException { - ByteBuffer ioBuffer = getSelector().getIoBuffer(); - ioBuffer.limit(Math.min(buffer.remaining(), ioBuffer.limit())); - int bytesRead; - try { - bytesRead = rawChannel.read(ioBuffer); - } catch (IOException e) { - closeNow = true; - throw e; - } - if (bytesRead < 0) { - closeNow = true; - return 0; - } else { - ioBuffer.flip(); - buffer.put(ioBuffer); - return bytesRead; - } - } - protected int readFromChannel(InboundChannelBuffer channelBuffer) throws IOException { ByteBuffer ioBuffer = getSelector().getIoBuffer(); int bytesRead; @@ -287,7 +269,7 @@ public abstract class SocketChannelContext extends ChannelContext int j = 0; while (j < buffers.length && ioBuffer.remaining() > 0) { ByteBuffer buffer = buffers[j++]; - copyBytes(ioBuffer, buffer); + ByteBufferUtils.copyBytes(ioBuffer, buffer); } channelBuffer.incrementIndex(bytesRead); return bytesRead; @@ -298,24 +280,6 @@ public abstract class SocketChannelContext extends ChannelContext // copying. private final int WRITE_LIMIT = 1 << 16; - protected int flushToChannel(ByteBuffer buffer) throws IOException { - int initialPosition = buffer.position(); - ByteBuffer ioBuffer = getSelector().getIoBuffer(); - ioBuffer.limit(Math.min(WRITE_LIMIT, ioBuffer.limit())); - copyBytes(buffer, ioBuffer); - ioBuffer.flip(); - int bytesWritten; - try { - bytesWritten = rawChannel.write(ioBuffer); - } catch (IOException e) { - closeNow = true; - buffer.position(initialPosition); - throw e; - } - buffer.position(initialPosition + bytesWritten); - return bytesWritten; - } - protected int flushToChannel(FlushOperation flushOperation) throws IOException { ByteBuffer ioBuffer = getSelector().getIoBuffer(); @@ -324,12 +288,8 @@ public abstract class SocketChannelContext extends ChannelContext while (continueFlush) { ioBuffer.clear(); ioBuffer.limit(Math.min(WRITE_LIMIT, ioBuffer.limit())); - int j = 0; ByteBuffer[] buffers = flushOperation.getBuffersToWrite(WRITE_LIMIT); - while (j < buffers.length && ioBuffer.remaining() > 0) { - ByteBuffer buffer = buffers[j++]; - copyBytes(buffer, ioBuffer); - } + ByteBufferUtils.copyBytes(buffers, ioBuffer); ioBuffer.flip(); int bytesFlushed; try { @@ -344,12 +304,4 @@ public abstract class SocketChannelContext extends ChannelContext } return totalBytesFlushed; } - - private void copyBytes(ByteBuffer from, ByteBuffer to) { - int nBytesToCopy = Math.min(to.remaining(), from.remaining()); - int initialLimit = from.limit(); - from.limit(from.position() + nBytesToCopy); - to.put(from); - from.limit(initialLimit); - } } diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/TaskScheduler.java b/libs/nio/src/main/java/org/elasticsearch/nio/TaskScheduler.java index e197230147c..c460e214798 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/TaskScheduler.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/TaskScheduler.java @@ -45,7 +45,7 @@ public class TaskScheduler { return delayedTask; } - Runnable pollTask(long relativeNanos) { + public Runnable pollTask(long relativeNanos) { DelayedTask task; while ((task = tasks.peek()) != null) { if (relativeNanos - task.deadline >= 0) { diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/utils/ByteBufferUtils.java b/libs/nio/src/main/java/org/elasticsearch/nio/utils/ByteBufferUtils.java new file mode 100644 index 00000000000..0be9806bada --- /dev/null +++ b/libs/nio/src/main/java/org/elasticsearch/nio/utils/ByteBufferUtils.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio.utils; + +import java.nio.ByteBuffer; + +public final class ByteBufferUtils { + + private ByteBufferUtils() {} + + /** + * Copies bytes from the array of byte buffers into the destination buffer. The number of bytes copied is + * limited by the bytes available to copy and the space remaining in the destination byte buffer. + * + * @param source byte buffers to copy from + * @param destination byte buffer to copy to + * + * @return number of bytes copied + */ + public static long copyBytes(ByteBuffer[] source, ByteBuffer destination) { + long bytesCopied = 0; + for (int i = 0; i < source.length && destination.hasRemaining(); i++) { + ByteBuffer buffer = source[i]; + bytesCopied += copyBytes(buffer, destination); + } + return bytesCopied; + } + + /** + * Copies bytes from source byte buffer into the destination buffer. The number of bytes copied is + * limited by the bytes available to copy and the space remaining in the destination byte buffer. + * + * @param source byte buffer to copy from + * @param destination byte buffer to copy to + * + * @return number of bytes copied + */ + public static int copyBytes(ByteBuffer source, ByteBuffer destination) { + int nBytesToCopy = Math.min(destination.remaining(), source.remaining()); + int initialLimit = source.limit(); + source.limit(source.position() + nBytesToCopy); + destination.put(source); + source.limit(initialLimit); + return nBytesToCopy; + } +} diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java index f5580430953..49e4fbecec9 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java @@ -19,23 +19,25 @@ package org.elasticsearch.nio; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.test.ESTestCase; import java.nio.ByteBuffer; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; +import java.util.function.IntFunction; public class InboundChannelBufferTests extends ESTestCase { - private static final int PAGE_SIZE = PageCacheRecycler.PAGE_SIZE_IN_BYTES; - private final Supplier defaultPageSupplier = () -> - new Page(ByteBuffer.allocate(PageCacheRecycler.BYTE_PAGE_SIZE), () -> { - }); + private IntFunction defaultPageAllocator; + + @Override + public void setUp() throws Exception { + super.setUp(); + defaultPageAllocator = (n) -> new Page(ByteBuffer.allocate(n), () -> {}); + } public void testNewBufferNoPages() { - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageAllocator); assertEquals(0, channelBuffer.getCapacity()); assertEquals(0, channelBuffer.getRemaining()); @@ -43,107 +45,107 @@ public class InboundChannelBufferTests extends ESTestCase { } public void testExpandCapacity() { - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageAllocator); assertEquals(0, channelBuffer.getCapacity()); assertEquals(0, channelBuffer.getRemaining()); - channelBuffer.ensureCapacity(PAGE_SIZE); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE); - assertEquals(PAGE_SIZE, channelBuffer.getCapacity()); - assertEquals(PAGE_SIZE, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE, channelBuffer.getRemaining()); - channelBuffer.ensureCapacity(PAGE_SIZE + 1); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE + 1); - assertEquals(PAGE_SIZE * 2, channelBuffer.getCapacity()); - assertEquals(PAGE_SIZE * 2, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * 2, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * 2, channelBuffer.getRemaining()); } public void testExpandCapacityMultiplePages() { - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); - channelBuffer.ensureCapacity(PAGE_SIZE); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageAllocator); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE); - assertEquals(PAGE_SIZE, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE, channelBuffer.getCapacity()); int multiple = randomInt(80); - channelBuffer.ensureCapacity(PAGE_SIZE + ((multiple * PAGE_SIZE) - randomInt(500))); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE + ((multiple * InboundChannelBuffer.PAGE_SIZE) - randomInt(500))); - assertEquals(PAGE_SIZE * (multiple + 1), channelBuffer.getCapacity()); - assertEquals(PAGE_SIZE * (multiple + 1), channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * (multiple + 1), channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * (multiple + 1), channelBuffer.getRemaining()); } public void testExpandCapacityRespectsOffset() { - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); - channelBuffer.ensureCapacity(PAGE_SIZE); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageAllocator); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE); - assertEquals(PAGE_SIZE, channelBuffer.getCapacity()); - assertEquals(PAGE_SIZE, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE, channelBuffer.getRemaining()); int offset = randomInt(300); channelBuffer.release(offset); - assertEquals(PAGE_SIZE - offset, channelBuffer.getCapacity()); - assertEquals(PAGE_SIZE - offset, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE - offset, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE - offset, channelBuffer.getRemaining()); - channelBuffer.ensureCapacity(PAGE_SIZE + 1); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE + 1); - assertEquals(PAGE_SIZE * 2 - offset, channelBuffer.getCapacity()); - assertEquals(PAGE_SIZE * 2 - offset, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * 2 - offset, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * 2 - offset, channelBuffer.getRemaining()); } public void testIncrementIndex() { - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); - channelBuffer.ensureCapacity(PAGE_SIZE); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageAllocator); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE); assertEquals(0, channelBuffer.getIndex()); - assertEquals(PAGE_SIZE, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE, channelBuffer.getRemaining()); channelBuffer.incrementIndex(10); assertEquals(10, channelBuffer.getIndex()); - assertEquals(PAGE_SIZE - 10, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE - 10, channelBuffer.getRemaining()); } public void testIncrementIndexWithOffset() { - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); - channelBuffer.ensureCapacity(PAGE_SIZE); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageAllocator); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE); assertEquals(0, channelBuffer.getIndex()); - assertEquals(PAGE_SIZE, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE, channelBuffer.getRemaining()); channelBuffer.release(10); - assertEquals(PAGE_SIZE - 10, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE - 10, channelBuffer.getRemaining()); channelBuffer.incrementIndex(10); assertEquals(10, channelBuffer.getIndex()); - assertEquals(PAGE_SIZE - 20, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE - 20, channelBuffer.getRemaining()); channelBuffer.release(2); assertEquals(8, channelBuffer.getIndex()); - assertEquals(PAGE_SIZE - 20, channelBuffer.getRemaining()); + assertEquals(InboundChannelBuffer.PAGE_SIZE - 20, channelBuffer.getRemaining()); } public void testReleaseClosesPages() { ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue<>(); - Supplier supplier = () -> { + IntFunction allocator = (n) -> { AtomicBoolean atomicBoolean = new AtomicBoolean(); queue.add(atomicBoolean); - return new Page(ByteBuffer.allocate(PAGE_SIZE), () -> atomicBoolean.set(true)); + return new Page(ByteBuffer.allocate(n), () -> atomicBoolean.set(true)); }; - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(supplier); - channelBuffer.ensureCapacity(PAGE_SIZE * 4); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(allocator); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE * 4); - assertEquals(PAGE_SIZE * 4, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * 4, channelBuffer.getCapacity()); assertEquals(4, queue.size()); for (AtomicBoolean closedRef : queue) { assertFalse(closedRef.get()); } - channelBuffer.release(2 * PAGE_SIZE); + channelBuffer.release(2 * InboundChannelBuffer.PAGE_SIZE); - assertEquals(PAGE_SIZE * 2, channelBuffer.getCapacity()); + assertEquals(InboundChannelBuffer.PAGE_SIZE * 2, channelBuffer.getCapacity()); assertTrue(queue.poll().get()); assertTrue(queue.poll().get()); @@ -153,13 +155,13 @@ public class InboundChannelBufferTests extends ESTestCase { public void testClose() { ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue<>(); - Supplier supplier = () -> { + IntFunction allocator = (n) -> { AtomicBoolean atomicBoolean = new AtomicBoolean(); queue.add(atomicBoolean); - return new Page(ByteBuffer.allocate(PAGE_SIZE), () -> atomicBoolean.set(true)); + return new Page(ByteBuffer.allocate(n), () -> atomicBoolean.set(true)); }; - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(supplier); - channelBuffer.ensureCapacity(PAGE_SIZE * 4); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(allocator); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE * 4); assertEquals(4, queue.size()); @@ -178,13 +180,13 @@ public class InboundChannelBufferTests extends ESTestCase { public void testCloseRetainedPages() { ConcurrentLinkedQueue queue = new ConcurrentLinkedQueue<>(); - Supplier supplier = () -> { + IntFunction allocator = (n) -> { AtomicBoolean atomicBoolean = new AtomicBoolean(); queue.add(atomicBoolean); - return new Page(ByteBuffer.allocate(PAGE_SIZE), () -> atomicBoolean.set(true)); + return new Page(ByteBuffer.allocate(n), () -> atomicBoolean.set(true)); }; - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(supplier); - channelBuffer.ensureCapacity(PAGE_SIZE * 4); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(allocator); + channelBuffer.ensureCapacity(InboundChannelBuffer.PAGE_SIZE * 4); assertEquals(4, queue.size()); @@ -192,7 +194,7 @@ public class InboundChannelBufferTests extends ESTestCase { assertFalse(closedRef.get()); } - Page[] pages = channelBuffer.sliceAndRetainPagesTo(PAGE_SIZE * 2); + Page[] pages = channelBuffer.sliceAndRetainPagesTo(InboundChannelBuffer.PAGE_SIZE * 2); pages[1].close(); @@ -220,10 +222,10 @@ public class InboundChannelBufferTests extends ESTestCase { } public void testAccessByteBuffers() { - InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageSupplier); + InboundChannelBuffer channelBuffer = new InboundChannelBuffer(defaultPageAllocator); int pages = randomInt(50) + 5; - channelBuffer.ensureCapacity(pages * PAGE_SIZE); + channelBuffer.ensureCapacity(pages * InboundChannelBuffer.PAGE_SIZE); long capacity = channelBuffer.getCapacity(); diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java index baf7abac79d..0040f70df85 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java @@ -34,8 +34,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.IntFunction; import java.util.function.Predicate; -import java.util.function.Supplier; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; @@ -285,8 +285,8 @@ public class SocketChannelContextTests extends ESTestCase { when(channel.getRawChannel()).thenReturn(realChannel); when(channel.isOpen()).thenReturn(true); Runnable closer = mock(Runnable.class); - Supplier pageSupplier = () -> new Page(ByteBuffer.allocate(1 << 14), closer); - InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + IntFunction pageAllocator = (n) -> new Page(ByteBuffer.allocate(n), closer); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageAllocator); buffer.ensureCapacity(1); TestSocketChannelContext context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, buffer); context.closeFromSelector(); @@ -294,29 +294,6 @@ public class SocketChannelContextTests extends ESTestCase { } } - public void testReadToBufferLimitsToPassedBuffer() throws IOException { - ByteBuffer buffer = ByteBuffer.allocate(10); - when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(completelyFillBufferAnswer()); - - int bytesRead = context.readFromChannel(buffer); - assertEquals(bytesRead, 10); - assertEquals(0, buffer.remaining()); - } - - public void testReadToBufferHandlesIOException() throws IOException { - when(rawChannel.read(any(ByteBuffer.class))).thenThrow(new IOException()); - - expectThrows(IOException.class, () -> context.readFromChannel(ByteBuffer.allocate(10))); - assertTrue(context.closeNow()); - } - - public void testReadToBufferHandlesEOF() throws IOException { - when(rawChannel.read(any(ByteBuffer.class))).thenReturn(-1); - - context.readFromChannel(ByteBuffer.allocate(10)); - assertTrue(context.closeNow()); - } - public void testReadToChannelBufferWillReadAsMuchAsIOBufferAllows() throws IOException { when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(completelyFillBufferAnswer()); @@ -344,33 +321,6 @@ public class SocketChannelContextTests extends ESTestCase { assertEquals(0, channelBuffer.getIndex()); } - public void testFlushBufferHandlesPartialFlush() throws IOException { - int bytesToConsume = 3; - when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(consumeBufferAnswer(bytesToConsume)); - - ByteBuffer buffer = ByteBuffer.allocate(10); - context.flushToChannel(buffer); - assertEquals(10 - bytesToConsume, buffer.remaining()); - } - - public void testFlushBufferHandlesFullFlush() throws IOException { - int bytesToConsume = 10; - when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(consumeBufferAnswer(bytesToConsume)); - - ByteBuffer buffer = ByteBuffer.allocate(10); - context.flushToChannel(buffer); - assertEquals(0, buffer.remaining()); - } - - public void testFlushBufferHandlesIOException() throws IOException { - when(rawChannel.write(any(ByteBuffer.class))).thenThrow(new IOException()); - - ByteBuffer buffer = ByteBuffer.allocate(10); - expectThrows(IOException.class, () -> context.flushToChannel(buffer)); - assertTrue(context.closeNow()); - assertEquals(10, buffer.remaining()); - } - public void testFlushBuffersHandlesZeroFlush() throws IOException { when(rawChannel.write(any(ByteBuffer.class))).thenAnswer(consumeBufferAnswer(0)); @@ -456,22 +406,14 @@ public class SocketChannelContextTests extends ESTestCase { @Override public int read() throws IOException { - if (randomBoolean()) { - InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); - return readFromChannel(channelBuffer); - } else { - return readFromChannel(ByteBuffer.allocate(10)); - } + InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); + return readFromChannel(channelBuffer); } @Override public void flushChannel() throws IOException { - if (randomBoolean()) { - ByteBuffer[] byteBuffers = {ByteBuffer.allocate(10)}; - flushToChannel(new FlushOperation(byteBuffers, (v, e) -> {})); - } else { - flushToChannel(ByteBuffer.allocate(10)); - } + ByteBuffer[] byteBuffers = {ByteBuffer.allocate(10)}; + flushToChannel(new FlushOperation(byteBuffers, (v, e) -> {})); } @Override diff --git a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java index fbea8d91726..babc3e10e55 100644 --- a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java +++ b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextDocGenerator.java @@ -24,10 +24,12 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.painless.action.PainlessContextClassBindingInfo; import org.elasticsearch.painless.action.PainlessContextClassInfo; import org.elasticsearch.painless.action.PainlessContextConstructorInfo; import org.elasticsearch.painless.action.PainlessContextFieldInfo; import org.elasticsearch.painless.action.PainlessContextInfo; +import org.elasticsearch.painless.action.PainlessContextInstanceBindingInfo; import org.elasticsearch.painless.action.PainlessContextMethodInfo; import java.io.IOException; @@ -69,26 +71,30 @@ public final class ContextDocGenerator { public static void main(String[] args) throws IOException { List contextInfos = getContextInfos(); - Set sharedClassInfos = createShared(contextInfos); + Set sharedStaticInfos = createSharedStatics(contextInfos); + Set sharedClassInfos = createSharedClasses(contextInfos); Path rootDir = resetRootDir(); Path sharedDir = createSharedDir(rootDir); - List classInfos = sortClassInfos(new ArrayList<>(sharedClassInfos), Collections.emptySet()); + List staticInfos = sortStaticInfos(Collections.emptySet(), new ArrayList<>(sharedStaticInfos)); + List classInfos = sortClassInfos(Collections.emptySet(), new ArrayList<>(sharedClassInfos)); Map javaNamesToDisplayNames = getDisplayNames(classInfos); - printSharedIndexPage(sharedDir, javaNamesToDisplayNames, classInfos); + printSharedIndexPage(sharedDir, javaNamesToDisplayNames, staticInfos, classInfos); printSharedPackagesPages(sharedDir, javaNamesToDisplayNames, classInfos); Set isSpecialized = new HashSet<>(); for (PainlessContextInfo contextInfo : contextInfos) { - Path contextDir = createContextDir(rootDir, contextInfo); - classInfos = sortClassInfos(new ArrayList<>(contextInfo.getClasses()), sharedClassInfos); + staticInfos = createContextStatics(contextInfo); + staticInfos = sortStaticInfos(sharedStaticInfos, staticInfos); + classInfos = sortClassInfos(sharedClassInfos, new ArrayList<>(contextInfo.getClasses())); - if (classInfos.isEmpty() == false) { + if (staticInfos.isEmpty() == false || classInfos.isEmpty() == false) { + Path contextDir = createContextDir(rootDir, contextInfo); isSpecialized.add(contextInfo); javaNamesToDisplayNames = getDisplayNames(contextInfo.getClasses()); - printContextIndexPage(contextDir, javaNamesToDisplayNames, sharedClassInfos, contextInfo, classInfos); + printContextIndexPage(contextDir, javaNamesToDisplayNames, contextInfo, staticInfos, classInfos); printContextPackagesPages(contextDir, javaNamesToDisplayNames, sharedClassInfos, contextInfo, classInfos); } } @@ -123,12 +129,44 @@ public final class ContextDocGenerator { return contextInfos; } - private static Set createShared(List contextInfos) { + private static Set createSharedStatics(List contextInfos) { + Map staticInfoCounts = new HashMap<>(); + + for (PainlessContextInfo contextInfo : contextInfos) { + for (PainlessContextMethodInfo methodInfo : contextInfo.getImportedMethods()) { + staticInfoCounts.merge(methodInfo, 1, Integer::sum); + } + + for (PainlessContextClassBindingInfo classBindingInfo : contextInfo.getClassBindings()) { + staticInfoCounts.merge(classBindingInfo, 1, Integer::sum); + } + + for (PainlessContextInstanceBindingInfo instanceBindingInfo : contextInfo.getInstanceBindings()) { + staticInfoCounts.merge(instanceBindingInfo, 1, Integer::sum); + } + } + + return staticInfoCounts.entrySet().stream().filter( + e -> e.getValue() == contextInfos.size() + ).map(Map.Entry::getKey).collect(Collectors.toSet()); + } + + private static List createContextStatics(PainlessContextInfo contextInfo) { + List staticInfos = new ArrayList<>(); + + staticInfos.addAll(contextInfo.getImportedMethods()); + staticInfos.addAll(contextInfo.getClassBindings()); + staticInfos.addAll(contextInfo.getInstanceBindings()); + + return staticInfos; + } + + private static Set createSharedClasses(List contextInfos) { Map classInfoCounts = new HashMap<>(); for (PainlessContextInfo contextInfo : contextInfos) { for (PainlessContextClassInfo classInfo : contextInfo.getClasses()) { - classInfoCounts.compute(classInfo, (k, v) -> v == null ? 1 : v + 1); + classInfoCounts.merge(classInfo, 1, Integer::sum); } } @@ -165,8 +203,8 @@ public final class ContextDocGenerator { stream.println(); } - private static void printSharedIndexPage( - Path sharedDir, Map javaNamesToDisplayNames, List classInfos) throws IOException { + private static void printSharedIndexPage(Path sharedDir, Map javaNamesToDisplayNames, + List staticInfos, List classInfos) throws IOException { Path sharedIndexPath = sharedDir.resolve("index.asciidoc"); @@ -181,13 +219,12 @@ public final class ContextDocGenerator { sharedIndexStream.println(); sharedIndexStream.println("The following API is available in all contexts."); - printIndex(sharedIndexStream, SHARED_HEADER, javaNamesToDisplayNames, Collections.emptySet(), classInfos); + printIndex(sharedIndexStream, SHARED_HEADER, javaNamesToDisplayNames, staticInfos, classInfos); } } private static void printContextIndexPage(Path contextDir, Map javaNamesToDisplayNames, - Set excludes, PainlessContextInfo contextInfo, List classInfos) - throws IOException { + PainlessContextInfo contextInfo, List staticInfos, List classInfos) throws IOException { Path contextIndexPath = contextDir.resolve("index.asciidoc"); @@ -205,34 +242,58 @@ public final class ContextDocGenerator { contextIndexStream.println( "* See the <<" + SHARED_HEADER + ", " + SHARED_NAME + " API>> for further API available in all contexts."); - printIndex(contextIndexStream, getContextHeader(contextInfo), javaNamesToDisplayNames, excludes, classInfos); + printIndex(contextIndexStream, getContextHeader(contextInfo), javaNamesToDisplayNames, staticInfos, classInfos); } } private static void printIndex(PrintStream indexStream, String contextHeader, Map javaNamesToDisplayNames, - Set excludes, List classInfos) { + List staticInfos, List classInfos) { String currentPackageName = null; - for (PainlessContextClassInfo classInfo : classInfos) { - if (excludes.contains(classInfo)) { - continue; + if (staticInfos.isEmpty() == false) { + indexStream.println(); + indexStream.println("==== Static Methods"); + indexStream.println("The following methods are directly callable without a class/instance qualifier. " + + "Note parameters denoted by a (*) are treated as read-only values."); + indexStream.println(); + + for (Object staticInfo : staticInfos) { + if (staticInfo instanceof PainlessContextMethodInfo) { + printMethod(indexStream, javaNamesToDisplayNames, false, (PainlessContextMethodInfo)staticInfo); + } else if (staticInfo instanceof PainlessContextClassBindingInfo) { + printClassBinding(indexStream, javaNamesToDisplayNames, (PainlessContextClassBindingInfo)staticInfo); + } else if (staticInfo instanceof PainlessContextInstanceBindingInfo) { + printInstanceBinding(indexStream, javaNamesToDisplayNames, (PainlessContextInstanceBindingInfo)staticInfo); + } else { + throw new IllegalArgumentException("unexpected static info type"); + } } + } - String classPackageName = classInfo.getName().substring(0, classInfo.getName().lastIndexOf('.')); + if (classInfos.isEmpty() == false) { + indexStream.println(); + indexStream.println("==== Classes By Package"); + indexStream.println("The following classes are available grouped by their respective packages. Click on a class " + + "to view details about the available methods and fields."); + indexStream.println(); - if (classPackageName.equals(currentPackageName) == false) { - currentPackageName = classPackageName; + for (PainlessContextClassInfo classInfo : classInfos) { + String classPackageName = classInfo.getName().substring(0, classInfo.getName().lastIndexOf('.')); - indexStream.println(); - indexStream.println("==== " + currentPackageName); - indexStream.println("<<" + getPackageHeader(contextHeader, currentPackageName) + ", " + - "Expand details for " + currentPackageName + ">>"); - indexStream.println(); + if (classPackageName.equals(currentPackageName) == false) { + currentPackageName = classPackageName; + + indexStream.println(); + indexStream.println("==== " + currentPackageName); + indexStream.println("<<" + getPackageHeader(contextHeader, currentPackageName) + ", " + + "Expand details for " + currentPackageName + ">>"); + indexStream.println(); + } + + String className = getType(javaNamesToDisplayNames, classInfo.getName()); + indexStream.println("* <<" + getClassHeader(contextHeader, className) + ", " + className + ">>"); } - - String className = getType(javaNamesToDisplayNames, classInfo.getName()); - indexStream.println("* <<" + getClassHeader(contextHeader, className) + ", " + className + ">>"); } indexStream.println(); @@ -289,8 +350,8 @@ public final class ContextDocGenerator { packagesStream.println(); packagesStream.println("[role=\"exclude\",id=\"" + getPackageHeader(contextHeader, currentPackageName) + "\"]"); packagesStream.println("=== " + contextName + " API for package " + currentPackageName); - packagesStream.println( - "See the <<" + contextHeader + ", " + contextName + " API>> for a high-level overview of all packages."); + packagesStream.println("See the <<" + contextHeader + ", " + contextName + " API>> " + + "for a high-level overview of all packages and classes."); } String className = getType(javaNamesToDisplayNames, classInfo.getName()); @@ -421,6 +482,49 @@ public final class ContextDocGenerator { stream.println(")"); } + private static void printClassBinding( + PrintStream stream, Map javaNamesToDisplayNames, PainlessContextClassBindingInfo classBindingInfo) { + + stream.print("* " + getType(javaNamesToDisplayNames, classBindingInfo.getRtn()) + " " + classBindingInfo.getName() + "("); + + for (int parameterIndex = 0; parameterIndex < classBindingInfo.getParameters().size(); ++parameterIndex) { + // temporary fix to not print org.elasticsearch.script.ScoreScript parameter until + // class instance bindings are created and the information is appropriately added to the context info classes + if ("org.elasticsearch.script.ScoreScript".equals( + getType(javaNamesToDisplayNames, classBindingInfo.getParameters().get(parameterIndex)))) { + continue; + } + + stream.print(getType(javaNamesToDisplayNames, classBindingInfo.getParameters().get(parameterIndex))); + + if (parameterIndex < classBindingInfo.getReadOnly()) { + stream.print(" *"); + } + + if (parameterIndex + 1 < classBindingInfo.getParameters().size()) { + stream.print(", "); + } + } + + stream.println(")"); + } + + private static void printInstanceBinding( + PrintStream stream, Map javaNamesToDisplayNames, PainlessContextInstanceBindingInfo instanceBindingInfo) { + + stream.print("* " + getType(javaNamesToDisplayNames, instanceBindingInfo.getRtn()) + " " + instanceBindingInfo.getName() + "("); + + for (int parameterIndex = 0; parameterIndex < instanceBindingInfo.getParameters().size(); ++parameterIndex) { + stream.print(getType(javaNamesToDisplayNames, instanceBindingInfo.getParameters().get(parameterIndex))); + + if (parameterIndex + 1 < instanceBindingInfo.getParameters().size()) { + stream.print(", "); + } + } + + stream.println(")"); + } + private static void printField( PrintStream stream, Map javaNamesToDisplayNames, boolean isStatic, PainlessContextFieldInfo fieldInfo) { @@ -602,15 +706,50 @@ public final class ContextDocGenerator { return contextNameBuilder.substring(0, contextNameBuilder.length() - 1); } + private static List sortStaticInfos(Set staticExcludes, List staticInfos) { + staticInfos = new ArrayList<>(staticInfos); + staticInfos.removeIf(staticExcludes::contains); + + staticInfos.sort((si1, si2) -> { + String sv1; + String sv2; + + if (si1 instanceof PainlessContextMethodInfo) { + sv1 = ((PainlessContextMethodInfo)si1).getSortValue(); + } else if (si1 instanceof PainlessContextClassBindingInfo) { + sv1 = ((PainlessContextClassBindingInfo)si1).getSortValue(); + } else if (si1 instanceof PainlessContextInstanceBindingInfo) { + sv1 = ((PainlessContextInstanceBindingInfo)si1).getSortValue(); + } else { + throw new IllegalArgumentException("unexpected static info type"); + } + + if (si2 instanceof PainlessContextMethodInfo) { + sv2 = ((PainlessContextMethodInfo)si2).getSortValue(); + } else if (si2 instanceof PainlessContextClassBindingInfo) { + sv2 = ((PainlessContextClassBindingInfo)si2).getSortValue(); + } else if (si2 instanceof PainlessContextInstanceBindingInfo) { + sv2 = ((PainlessContextInstanceBindingInfo)si2).getSortValue(); + } else { + throw new IllegalArgumentException("unexpected static info type"); + } + + return sv1.compareTo(sv2); + }); + + return staticInfos; + } + private static List sortClassInfos( - List classInfos, Set excludes) { + Set classExcludes, List classInfos) { + classInfos = new ArrayList<>(classInfos); classInfos.removeIf(v -> "void".equals(v.getName()) || "boolean".equals(v.getName()) || "byte".equals(v.getName()) || "short".equals(v.getName()) || "char".equals(v.getName()) || "int".equals(v.getName()) || "long".equals(v.getName()) || "float".equals(v.getName()) || "double".equals(v.getName()) || "org.elasticsearch.painless.lookup.def".equals(v.getName()) || - isInternalClass(v.getName()) || excludes.contains(v) + isInternalClass(v.getName()) || classExcludes.contains(v) ); classInfos.sort((c1, c2) -> { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index e4b6b6a07d9..4928e4fd01f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -186,7 +186,7 @@ public class TransportReindexAction extends HandledTransportAction succeeds("target_multi", "foo")); - assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [[")); - // The index names can come in either order - assertThat(e.getMessage(), containsString("target")); - assertThat(e.getMessage(), containsString("target2")); + assertThat(e.getMessage(), containsString("no write index is defined for alias [target_multi]. The write index may be explicitly " + + "disabled using is_write_index=false or the alias points to multiple indices without one being designated as a " + + "write index")); + } + + public void testTargetIsAliasWithWriteIndexDisabled() { + Exception e = expectThrows(IllegalArgumentException.class, () -> succeeds("target_alias_with_write_index_disabled", "foo")); + assertThat(e.getMessage(), containsString("no write index is defined for alias [target_alias_with_write_index_disabled]. " + + "The write index may be explicitly disabled using is_write_index=false or the alias points to multiple indices without one " + + "being designated as a write index")); + succeeds("qux", "foo"); // writing directly into the index of which this is the alias works though + } + + public void testTargetIsWriteAlias() { + succeeds("target_multi_with_write_index", "foo"); + succeeds("target_multi_with_write_index", "target2_without_write_index"); + fails("target_multi_with_write_index", "target_multi_with_write_index"); + fails("target_multi_with_write_index", "target_with_write_index"); } public void testRemoteInfoSkipsValidation() { @@ -97,7 +115,7 @@ public class ReindexSourceTargetValidationTests extends ESTestCase { private void fails(String target, String... sources) { Exception e = expectThrows(ActionRequestValidationException.class, () -> succeeds(target, sources)); - assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from [target]")); + assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from")); } private void succeeds(String target, String... sources) { @@ -110,12 +128,16 @@ public class ReindexSourceTargetValidationTests extends ESTestCase { } private static IndexMetaData index(String name, String... aliases) { + return index(name, null, aliases); + } + + private static IndexMetaData index(String name, @Nullable Boolean writeIndex, String... aliases) { IndexMetaData.Builder builder = IndexMetaData.builder(name).settings(Settings.builder() .put("index.version.created", Version.CURRENT.id) .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1)); for (String alias: aliases) { - builder.putAlias(AliasMetaData.builder(alias).build()); + builder.putAlias(AliasMetaData.builder(alias).writeIndex(writeIndex).build()); } return builder.build(); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index c0dc011a06c..356cfa0bbf9 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -45,7 +45,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -59,6 +58,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpReadTimeoutException; import org.elasticsearch.http.HttpServerChannel; import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder; @@ -289,12 +289,9 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { } @Override - protected void onException(HttpChannel channel, Exception cause) { + public void onException(HttpChannel channel, Exception cause) { if (cause instanceof ReadTimeoutException) { - if (logger.isTraceEnabled()) { - logger.trace("Http read timeout {}", channel); - } - CloseableChannel.closeChannel(channel); + super.onException(channel, new HttpReadTimeoutException(readTimeoutMillis, cause)); } else { super.onException(channel, cause); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 63e38823acb..bc4ebe5672e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -73,8 +73,8 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.PatternSyntaxException; import java.util.stream.Collectors; @@ -346,7 +346,7 @@ public class Netty4HttpServerTransportTests extends ESTestCase { transport.start(); final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); - AtomicBoolean channelClosed = new AtomicBoolean(false); + CountDownLatch channelClosedLatch = new CountDownLatch(1); Bootstrap clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).handler(new ChannelInitializer() { @@ -357,9 +357,9 @@ public class Netty4HttpServerTransportTests extends ESTestCase { } }).group(group); ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); - connect.channel().closeFuture().addListener(future -> channelClosed.set(true)); + connect.channel().closeFuture().addListener(future -> channelClosedLatch.countDown()); - assertBusy(() -> assertTrue("Channel should be closed due to read timeout", channelClosed.get()), 5, TimeUnit.SECONDS); + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); } finally { group.shutdownGracefully().await(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java index 200c9aa4bbe..71585ea7a4e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/ESLoggingHandlerIT.java @@ -59,7 +59,7 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); final MockLogAppender.LoggingExpectation flushExpectation = @@ -74,7 +74,7 @@ public class ESLoggingHandlerIT extends ESNetty4IntegTestCase { " READ: \\d+B"; final MockLogAppender.LoggingExpectation readExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); appender.addExpectation(writeExpectation); diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureSeedHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureSeedHostsProvider.java index d6b5a85b51f..4c527264e23 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureSeedHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureSeedHostsProvider.java @@ -208,8 +208,7 @@ public class AzureSeedHostsProvider implements SeedHostsProvider { } try { - // we only limit to 1 port per address, makes no sense to ping 100 ports - TransportAddress[] addresses = transportService.addressesFromString(networkAddress, 1); + TransportAddress[] addresses = transportService.addressesFromString(networkAddress); for (TransportAddress address : addresses) { logger.trace("adding {}, transport_address {}", networkAddress, address); dynamicHosts.add(address); diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 107d1ecdde3..e2bfad0df93 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -23,7 +23,7 @@ esplugin { } versions << [ - 'aws': '1.11.187' + 'aws': '1.11.505' ] dependencies { diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 deleted file mode 100644 index a5293a9bf65..00000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.187.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6f47fcd3c2917bef69dc36aba203c5ea4af9bf24 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 new file mode 100644 index 00000000000..add5db290e8 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-core-1.11.505.jar.sha1 @@ -0,0 +1 @@ +d19328c227b2b5ad81d137361ebc9cbcd0396465 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 deleted file mode 100644 index 4602436e081..00000000000 --- a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.187.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3e5a8601f3105624674b1a12ca34f453a4b5895 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 new file mode 100644 index 00000000000..857f0888de3 --- /dev/null +++ b/plugins/discovery-ec2/licenses/aws-java-sdk-ec2-1.11.505.jar.sha1 @@ -0,0 +1 @@ +b669b3c90ea9bf73734ab26f0cb30c5c66addf55 \ No newline at end of file diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 209ab327839..8397549f384 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -20,6 +20,7 @@ import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture +import org.elasticsearch.gradle.test.RestIntegTestTask apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -30,14 +31,6 @@ dependencies { final int ec2NumberOfNodes = 3 -/** A task to start the AmazonEC2Fixture which emulates an EC2 service **/ -task ec2Fixture(type: AntFixture) { - dependsOn compileTestJava - env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" - executable = new File(project.runtimeJavaHome, 'bin/java') - args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/integTest-1/config/unicast_hosts.txt" -} - Map expansions = [ 'expected_nodes': ec2NumberOfNodes ] @@ -47,20 +40,71 @@ processTestResources { MavenFilteringHack.filter(it, expansions) } -integTest { - dependsOn ec2Fixture, project(':plugins:discovery-ec2').bundlePlugin +// disable default test task, use spezialized ones below +integTest.enabled = false + +/* + * Test using various credential providers (see also https://docs.aws.amazon.com/sdk-for-java/v2/developer-guide/credentials.html): + * - Elasticsearch Keystore (secure settings discovery.ec2.access_key and discovery.ec2.secret_key) + * - Java system properties (aws.accessKeyId and aws.secretAccessKey) + * - Environment variables (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY) + * - ECS container credentials (loaded from ECS if the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI is set) + * - Instance profile credentials (delivered through the EC2 metadata service) + * + * Notably missing is a test for the default credential profiles file, which is located at ~/.aws/credentials and would at least require a + * custom Java security policy to work. + */ +['KeyStore', 'EnvVariables', 'SystemProperties', 'ContainerCredentials', 'InstanceProfile'].forEach { action -> + AntFixture fixture = tasks.create(name: "ec2Fixture${action}", type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/integTest${action}-1/config/unicast_hosts.txt" + } + + tasks.create(name: "integTest${action}", type: RestIntegTestTask) { + dependsOn fixture, project(':plugins:discovery-ec2').bundlePlugin + } + + check.dependsOn("integTest${action}") + + testClusters."integTest${action}" { + numberOfNodes = ec2NumberOfNodes + plugin file(project(':plugins:discovery-ec2').bundlePlugin.archiveFile) + + setting 'discovery.seed_providers', 'ec2' + setting 'network.host', '_ec2_' + setting 'discovery.ec2.endpoint', { "http://${-> fixture.addressAndPort}" } + + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${-> fixture.addressAndPort}" } + } } -testClusters.integTest { - numberOfNodes = ec2NumberOfNodes - plugin file(project(':plugins:discovery-ec2').bundlePlugin.archiveFile) - +// Extra config for KeyStore +testClusters.integTestKeyStore { keystore 'discovery.ec2.access_key', 'ec2_integration_test_access_key' keystore 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key' - - setting 'discovery.seed_providers', 'ec2' - setting 'network.host', '_ec2_' - setting 'discovery.ec2.endpoint', { "http://${ec2Fixture.addressAndPort}" } - - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${ec2Fixture.addressAndPort}" } } + +// Extra config for EnvVariables +testClusters.integTestEnvVariables { + environment 'AWS_ACCESS_KEY_ID', 'ec2_integration_test_access_key' + environment 'AWS_SECRET_ACCESS_KEY', 'ec2_integration_test_secret_key' +} + +// Extra config for SystemProperties +testClusters.integTestSystemProperties { + systemProperty 'aws.accessKeyId', 'ec2_integration_test_access_key' + systemProperty 'aws.secretKey', 'ec2_integration_test_secret_key' +} + +// Extra config for ContainerCredentials +ec2FixtureContainerCredentials.env 'ACTIVATE_CONTAINER_CREDENTIALS', true + +testClusters.integTestContainerCredentials { + environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', + { "http://${-> tasks.findByName("ec2FixtureContainerCredentials").addressAndPort}/ecs_credentials_endpoint" } +} + +// Extra config for InstanceProfile +ec2FixtureInstanceProfile.env 'ACTIVATE_INSTANCE_PROFILE', true diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java index 6027bd86159..32abcdc43e6 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java @@ -18,10 +18,12 @@ */ package org.elasticsearch.discovery.ec2; +import com.amazonaws.util.DateUtils; import org.apache.http.NameValuePair; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URLEncodedUtils; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.fixture.AbstractHttpFixture; @@ -34,8 +36,12 @@ import java.io.StringWriter; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Date; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; import java.util.UUID; +import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; @@ -45,10 +51,14 @@ import static java.nio.charset.StandardCharsets.UTF_8; public class AmazonEC2Fixture extends AbstractHttpFixture { private final Path nodes; + private final boolean instanceProfile; + private final boolean containerCredentials; - private AmazonEC2Fixture(final String workingDir, final String nodesUriPath) { + private AmazonEC2Fixture(final String workingDir, final String nodesUriPath, boolean instanceProfile, boolean containerCredentials) { super(workingDir); this.nodes = toPath(Objects.requireNonNull(nodesUriPath)); + this.instanceProfile = instanceProfile; + this.containerCredentials = containerCredentials; } public static void main(String[] args) throws Exception { @@ -56,7 +66,10 @@ public class AmazonEC2Fixture extends AbstractHttpFixture { throw new IllegalArgumentException("AmazonEC2Fixture "); } - final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1]); + boolean instanceProfile = Booleans.parseBoolean(System.getenv("ACTIVATE_INSTANCE_PROFILE"), false); + boolean containerCredentials = Booleans.parseBoolean(System.getenv("ACTIVATE_CONTAINER_CREDENTIALS"), false); + + final AmazonEC2Fixture fixture = new AmazonEC2Fixture(args[0], args[1], instanceProfile, containerCredentials); fixture.listen(); } @@ -65,6 +78,12 @@ public class AmazonEC2Fixture extends AbstractHttpFixture { if ("/".equals(request.getPath()) && (HttpPost.METHOD_NAME.equals(request.getMethod()))) { final String userAgent = request.getHeader("User-Agent"); if (userAgent != null && userAgent.startsWith("aws-sdk-java")) { + + final String auth = request.getHeader("Authorization"); + if (auth == null || auth.contains("ec2_integration_test_access_key") == false) { + throw new IllegalArgumentException("wrong access key: " + auth); + } + // Simulate an EC2 DescribeInstancesResponse byte[] responseBody = EMPTY_BYTE; for (NameValuePair parse : URLEncodedUtils.parse(new String(request.getBody(), UTF_8), UTF_8)) { @@ -79,6 +98,32 @@ public class AmazonEC2Fixture extends AbstractHttpFixture { if ("/latest/meta-data/local-ipv4".equals(request.getPath()) && (HttpGet.METHOD_NAME.equals(request.getMethod()))) { return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, "127.0.0.1".getBytes(UTF_8)); } + + if (instanceProfile && + "/latest/meta-data/iam/security-credentials/".equals(request.getPath()) && + HttpGet.METHOD_NAME.equals(request.getMethod())) { + final Map headers = new HashMap<>(contentType("text/plain")); + return new Response(RestStatus.OK.getStatus(), headers, "my_iam_profile".getBytes(UTF_8)); + } + + if ((containerCredentials && + "/ecs_credentials_endpoint".equals(request.getPath()) && + HttpGet.METHOD_NAME.equals(request.getMethod())) || + ("/latest/meta-data/iam/security-credentials/my_iam_profile".equals(request.getPath()) && + HttpGet.METHOD_NAME.equals(request.getMethod()))) { + final Date expiration = new Date(new Date().getTime() + TimeUnit.DAYS.toMillis(1)); + final String response = "{" + + "\"AccessKeyId\": \"" + "ec2_integration_test_access_key" + "\"," + + "\"Expiration\": \"" + DateUtils.formatISO8601Date(expiration) + "\"," + + "\"RoleArn\": \"" + "test" + "\"," + + "\"SecretAccessKey\": \"" + "test" + "\"," + + "\"Token\": \"" + "test" + "\"" + + "}"; + + final Map headers = new HashMap<>(contentType("application/json")); + return new Response(RestStatus.OK.getStatus(), headers, response.getBytes(UTF_8)); + } + return null; } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java index 97b7ade49f0..515aef8408b 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2SeedHostsProvider.java @@ -174,8 +174,7 @@ class AwsEc2SeedHostsProvider implements SeedHostsProvider { } if (address != null) { try { - // we only limit to 1 port per address, makes no sense to ping 100 ports - final TransportAddress[] addresses = transportService.addressesFromString(address, 1); + final TransportAddress[] addresses = transportService.addressesFromString(address); for (int i = 0; i < addresses.length; i++) { logger.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]); dynamicHosts.add(addresses[i]); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index 5f384c04912..739b964925c 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -22,13 +22,12 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.amazonaws.http.IdleConnectionReaper; -import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.retry.RetryPolicy; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; @@ -97,11 +96,11 @@ class AwsEc2ServiceImpl implements AwsEc2Service { static AWSCredentialsProvider buildCredentials(Logger logger, Ec2ClientSettings clientSettings) { final AWSCredentials credentials = clientSettings.credentials; if (credentials == null) { - logger.debug("Using either environment variables, system properties or instance profile credentials"); - return new DefaultAWSCredentialsProviderChain(); + logger.debug("Using default provider chain"); + return DefaultAWSCredentialsProviderChain.getInstance(); } else { logger.debug("Using basic key/secret credentials"); - return new StaticCredentialsProvider(credentials); + return new AWSStaticCredentialsProvider(credentials); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java index f1c373ee33a..3135769df5f 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Mock.java @@ -20,37 +20,48 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.AmazonClientException; -import com.amazonaws.AmazonServiceException; import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.ClientConfiguration; import com.amazonaws.ResponseMetadata; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.regions.Region; import com.amazonaws.services.ec2.AmazonEC2; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteRequest; +import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteResult; +import com.amazonaws.services.ec2.model.AcceptTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.AcceptTransitGatewayVpcAttachmentResult; +import com.amazonaws.services.ec2.model.AcceptVpcEndpointConnectionsRequest; +import com.amazonaws.services.ec2.model.AcceptVpcEndpointConnectionsResult; import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.AcceptVpcPeeringConnectionResult; +import com.amazonaws.services.ec2.model.AdvertiseByoipCidrRequest; +import com.amazonaws.services.ec2.model.AdvertiseByoipCidrResult; import com.amazonaws.services.ec2.model.AllocateAddressRequest; import com.amazonaws.services.ec2.model.AllocateAddressResult; import com.amazonaws.services.ec2.model.AllocateHostsRequest; import com.amazonaws.services.ec2.model.AllocateHostsResult; -import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesRequest; -import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesResult; +import com.amazonaws.services.ec2.model.ApplySecurityGroupsToClientVpnTargetNetworkRequest; +import com.amazonaws.services.ec2.model.ApplySecurityGroupsToClientVpnTargetNetworkResult; import com.amazonaws.services.ec2.model.AssignIpv6AddressesRequest; import com.amazonaws.services.ec2.model.AssignIpv6AddressesResult; +import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesRequest; +import com.amazonaws.services.ec2.model.AssignPrivateIpAddressesResult; import com.amazonaws.services.ec2.model.AssociateAddressRequest; import com.amazonaws.services.ec2.model.AssociateAddressResult; -import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockRequest; -import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockResult; -import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockRequest; -import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockResult; -import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileRequest; -import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileResult; -import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteRequest; -import com.amazonaws.services.ec2.model.AcceptReservedInstancesExchangeQuoteResult; +import com.amazonaws.services.ec2.model.AssociateClientVpnTargetNetworkRequest; +import com.amazonaws.services.ec2.model.AssociateClientVpnTargetNetworkResult; import com.amazonaws.services.ec2.model.AssociateDhcpOptionsRequest; import com.amazonaws.services.ec2.model.AssociateDhcpOptionsResult; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileRequest; +import com.amazonaws.services.ec2.model.AssociateIamInstanceProfileResult; import com.amazonaws.services.ec2.model.AssociateRouteTableRequest; import com.amazonaws.services.ec2.model.AssociateRouteTableResult; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateSubnetCidrBlockResult; +import com.amazonaws.services.ec2.model.AssociateTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.AssociateTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.AssociateVpcCidrBlockResult; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcRequest; import com.amazonaws.services.ec2.model.AttachClassicLinkVpcResult; import com.amazonaws.services.ec2.model.AttachInternetGatewayRequest; @@ -61,6 +72,8 @@ import com.amazonaws.services.ec2.model.AttachVolumeRequest; import com.amazonaws.services.ec2.model.AttachVolumeResult; import com.amazonaws.services.ec2.model.AttachVpnGatewayRequest; import com.amazonaws.services.ec2.model.AttachVpnGatewayResult; +import com.amazonaws.services.ec2.model.AuthorizeClientVpnIngressRequest; +import com.amazonaws.services.ec2.model.AuthorizeClientVpnIngressResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressRequest; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.AuthorizeSecurityGroupIngressRequest; @@ -69,6 +82,8 @@ import com.amazonaws.services.ec2.model.BundleInstanceRequest; import com.amazonaws.services.ec2.model.BundleInstanceResult; import com.amazonaws.services.ec2.model.CancelBundleTaskRequest; import com.amazonaws.services.ec2.model.CancelBundleTaskResult; +import com.amazonaws.services.ec2.model.CancelCapacityReservationRequest; +import com.amazonaws.services.ec2.model.CancelCapacityReservationResult; import com.amazonaws.services.ec2.model.CancelConversionTaskRequest; import com.amazonaws.services.ec2.model.CancelConversionTaskResult; import com.amazonaws.services.ec2.model.CancelExportTaskRequest; @@ -83,24 +98,34 @@ import com.amazonaws.services.ec2.model.CancelSpotInstanceRequestsRequest; import com.amazonaws.services.ec2.model.CancelSpotInstanceRequestsResult; import com.amazonaws.services.ec2.model.ConfirmProductInstanceRequest; import com.amazonaws.services.ec2.model.ConfirmProductInstanceResult; +import com.amazonaws.services.ec2.model.CopyFpgaImageRequest; +import com.amazonaws.services.ec2.model.CopyFpgaImageResult; import com.amazonaws.services.ec2.model.CopyImageRequest; import com.amazonaws.services.ec2.model.CopyImageResult; import com.amazonaws.services.ec2.model.CopySnapshotRequest; import com.amazonaws.services.ec2.model.CopySnapshotResult; +import com.amazonaws.services.ec2.model.CreateCapacityReservationRequest; +import com.amazonaws.services.ec2.model.CreateCapacityReservationResult; +import com.amazonaws.services.ec2.model.CreateClientVpnEndpointRequest; +import com.amazonaws.services.ec2.model.CreateClientVpnEndpointResult; +import com.amazonaws.services.ec2.model.CreateClientVpnRouteRequest; +import com.amazonaws.services.ec2.model.CreateClientVpnRouteResult; import com.amazonaws.services.ec2.model.CreateCustomerGatewayRequest; -import com.amazonaws.services.ec2.model.CreateDefaultVpcResult; -import com.amazonaws.services.ec2.model.CreateDefaultVpcRequest; import com.amazonaws.services.ec2.model.CreateCustomerGatewayResult; +import com.amazonaws.services.ec2.model.CreateDefaultSubnetRequest; +import com.amazonaws.services.ec2.model.CreateDefaultSubnetResult; +import com.amazonaws.services.ec2.model.CreateDefaultVpcRequest; +import com.amazonaws.services.ec2.model.CreateDefaultVpcResult; import com.amazonaws.services.ec2.model.CreateDhcpOptionsRequest; import com.amazonaws.services.ec2.model.CreateDhcpOptionsResult; import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayRequest; import com.amazonaws.services.ec2.model.CreateEgressOnlyInternetGatewayResult; -import com.amazonaws.services.ec2.model.CreateFpgaImageRequest; -import com.amazonaws.services.ec2.model.CreateFpgaImageResult; -import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionRequest; -import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionResult; +import com.amazonaws.services.ec2.model.CreateFleetRequest; +import com.amazonaws.services.ec2.model.CreateFleetResult; import com.amazonaws.services.ec2.model.CreateFlowLogsRequest; import com.amazonaws.services.ec2.model.CreateFlowLogsResult; +import com.amazonaws.services.ec2.model.CreateFpgaImageRequest; +import com.amazonaws.services.ec2.model.CreateFpgaImageResult; import com.amazonaws.services.ec2.model.CreateImageRequest; import com.amazonaws.services.ec2.model.CreateImageResult; import com.amazonaws.services.ec2.model.CreateInstanceExportTaskRequest; @@ -109,12 +134,18 @@ import com.amazonaws.services.ec2.model.CreateInternetGatewayRequest; import com.amazonaws.services.ec2.model.CreateInternetGatewayResult; import com.amazonaws.services.ec2.model.CreateKeyPairRequest; import com.amazonaws.services.ec2.model.CreateKeyPairResult; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateResult; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionRequest; +import com.amazonaws.services.ec2.model.CreateLaunchTemplateVersionResult; import com.amazonaws.services.ec2.model.CreateNatGatewayRequest; import com.amazonaws.services.ec2.model.CreateNatGatewayResult; import com.amazonaws.services.ec2.model.CreateNetworkAclEntryRequest; import com.amazonaws.services.ec2.model.CreateNetworkAclEntryResult; import com.amazonaws.services.ec2.model.CreateNetworkAclRequest; import com.amazonaws.services.ec2.model.CreateNetworkAclResult; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.CreateNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.CreateNetworkInterfaceResult; import com.amazonaws.services.ec2.model.CreatePlacementGroupRequest; @@ -135,10 +166,22 @@ import com.amazonaws.services.ec2.model.CreateSubnetRequest; import com.amazonaws.services.ec2.model.CreateSubnetResult; import com.amazonaws.services.ec2.model.CreateTagsRequest; import com.amazonaws.services.ec2.model.CreateTagsResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.CreateTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.CreateTransitGatewayVpcAttachmentResult; import com.amazonaws.services.ec2.model.CreateVolumeRequest; import com.amazonaws.services.ec2.model.CreateVolumeResult; +import com.amazonaws.services.ec2.model.CreateVpcEndpointConnectionNotificationRequest; +import com.amazonaws.services.ec2.model.CreateVpcEndpointConnectionNotificationResult; import com.amazonaws.services.ec2.model.CreateVpcEndpointRequest; import com.amazonaws.services.ec2.model.CreateVpcEndpointResult; +import com.amazonaws.services.ec2.model.CreateVpcEndpointServiceConfigurationRequest; +import com.amazonaws.services.ec2.model.CreateVpcEndpointServiceConfigurationResult; import com.amazonaws.services.ec2.model.CreateVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.CreateVpcPeeringConnectionResult; import com.amazonaws.services.ec2.model.CreateVpcRequest; @@ -149,26 +192,38 @@ import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteRequest; import com.amazonaws.services.ec2.model.CreateVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.CreateVpnGatewayRequest; import com.amazonaws.services.ec2.model.CreateVpnGatewayResult; +import com.amazonaws.services.ec2.model.DeleteClientVpnEndpointRequest; +import com.amazonaws.services.ec2.model.DeleteClientVpnEndpointResult; +import com.amazonaws.services.ec2.model.DeleteClientVpnRouteRequest; +import com.amazonaws.services.ec2.model.DeleteClientVpnRouteResult; import com.amazonaws.services.ec2.model.DeleteCustomerGatewayRequest; import com.amazonaws.services.ec2.model.DeleteCustomerGatewayResult; import com.amazonaws.services.ec2.model.DeleteDhcpOptionsRequest; import com.amazonaws.services.ec2.model.DeleteDhcpOptionsResult; import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayRequest; import com.amazonaws.services.ec2.model.DeleteEgressOnlyInternetGatewayResult; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionRequest; -import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionResult; +import com.amazonaws.services.ec2.model.DeleteFleetsRequest; +import com.amazonaws.services.ec2.model.DeleteFleetsResult; import com.amazonaws.services.ec2.model.DeleteFlowLogsRequest; import com.amazonaws.services.ec2.model.DeleteFlowLogsResult; +import com.amazonaws.services.ec2.model.DeleteFpgaImageRequest; +import com.amazonaws.services.ec2.model.DeleteFpgaImageResult; import com.amazonaws.services.ec2.model.DeleteInternetGatewayRequest; import com.amazonaws.services.ec2.model.DeleteInternetGatewayResult; import com.amazonaws.services.ec2.model.DeleteKeyPairRequest; import com.amazonaws.services.ec2.model.DeleteKeyPairResult; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateResult; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsRequest; +import com.amazonaws.services.ec2.model.DeleteLaunchTemplateVersionsResult; import com.amazonaws.services.ec2.model.DeleteNatGatewayRequest; import com.amazonaws.services.ec2.model.DeleteNatGatewayResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryRequest; import com.amazonaws.services.ec2.model.DeleteNetworkAclEntryResult; import com.amazonaws.services.ec2.model.DeleteNetworkAclRequest; import com.amazonaws.services.ec2.model.DeleteNetworkAclResult; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionRequest; +import com.amazonaws.services.ec2.model.DeleteNetworkInterfacePermissionResult; import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceRequest; import com.amazonaws.services.ec2.model.DeleteNetworkInterfaceResult; import com.amazonaws.services.ec2.model.DeletePlacementGroupRequest; @@ -187,8 +242,20 @@ import com.amazonaws.services.ec2.model.DeleteSubnetRequest; import com.amazonaws.services.ec2.model.DeleteSubnetResult; import com.amazonaws.services.ec2.model.DeleteTagsRequest; import com.amazonaws.services.ec2.model.DeleteTagsResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.DeleteTransitGatewayVpcAttachmentResult; import com.amazonaws.services.ec2.model.DeleteVolumeRequest; import com.amazonaws.services.ec2.model.DeleteVolumeResult; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointConnectionNotificationsRequest; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointConnectionNotificationsResult; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointServiceConfigurationsRequest; +import com.amazonaws.services.ec2.model.DeleteVpcEndpointServiceConfigurationsResult; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsRequest; import com.amazonaws.services.ec2.model.DeleteVpcEndpointsResult; import com.amazonaws.services.ec2.model.DeleteVpcPeeringConnectionRequest; @@ -201,18 +268,36 @@ import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteRequest; import com.amazonaws.services.ec2.model.DeleteVpnConnectionRouteResult; import com.amazonaws.services.ec2.model.DeleteVpnGatewayRequest; import com.amazonaws.services.ec2.model.DeleteVpnGatewayResult; +import com.amazonaws.services.ec2.model.DeprovisionByoipCidrRequest; +import com.amazonaws.services.ec2.model.DeprovisionByoipCidrResult; import com.amazonaws.services.ec2.model.DeregisterImageRequest; import com.amazonaws.services.ec2.model.DeregisterImageResult; import com.amazonaws.services.ec2.model.DescribeAccountAttributesRequest; import com.amazonaws.services.ec2.model.DescribeAccountAttributesResult; import com.amazonaws.services.ec2.model.DescribeAddressesRequest; import com.amazonaws.services.ec2.model.DescribeAddressesResult; +import com.amazonaws.services.ec2.model.DescribeAggregateIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribeAggregateIdFormatResult; import com.amazonaws.services.ec2.model.DescribeAvailabilityZonesRequest; import com.amazonaws.services.ec2.model.DescribeAvailabilityZonesResult; import com.amazonaws.services.ec2.model.DescribeBundleTasksRequest; import com.amazonaws.services.ec2.model.DescribeBundleTasksResult; +import com.amazonaws.services.ec2.model.DescribeByoipCidrsRequest; +import com.amazonaws.services.ec2.model.DescribeByoipCidrsResult; +import com.amazonaws.services.ec2.model.DescribeCapacityReservationsRequest; +import com.amazonaws.services.ec2.model.DescribeCapacityReservationsResult; import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesRequest; import com.amazonaws.services.ec2.model.DescribeClassicLinkInstancesResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnAuthorizationRulesRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnAuthorizationRulesResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnConnectionsRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnConnectionsResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnEndpointsRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnEndpointsResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnRoutesRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnRoutesResult; +import com.amazonaws.services.ec2.model.DescribeClientVpnTargetNetworksRequest; +import com.amazonaws.services.ec2.model.DescribeClientVpnTargetNetworksResult; import com.amazonaws.services.ec2.model.DescribeConversionTasksRequest; import com.amazonaws.services.ec2.model.DescribeConversionTasksResult; import com.amazonaws.services.ec2.model.DescribeCustomerGatewaysRequest; @@ -221,26 +306,34 @@ import com.amazonaws.services.ec2.model.DescribeDhcpOptionsRequest; import com.amazonaws.services.ec2.model.DescribeDhcpOptionsResult; import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysRequest; import com.amazonaws.services.ec2.model.DescribeEgressOnlyInternetGatewaysResult; -import com.amazonaws.services.ec2.model.DescribeExportTasksRequest; -import com.amazonaws.services.ec2.model.DescribeExportTasksResult; import com.amazonaws.services.ec2.model.DescribeElasticGpusRequest; import com.amazonaws.services.ec2.model.DescribeElasticGpusResult; +import com.amazonaws.services.ec2.model.DescribeExportTasksRequest; +import com.amazonaws.services.ec2.model.DescribeExportTasksResult; +import com.amazonaws.services.ec2.model.DescribeFleetHistoryRequest; +import com.amazonaws.services.ec2.model.DescribeFleetHistoryResult; +import com.amazonaws.services.ec2.model.DescribeFleetInstancesRequest; +import com.amazonaws.services.ec2.model.DescribeFleetInstancesResult; +import com.amazonaws.services.ec2.model.DescribeFleetsRequest; +import com.amazonaws.services.ec2.model.DescribeFleetsResult; +import com.amazonaws.services.ec2.model.DescribeFlowLogsRequest; +import com.amazonaws.services.ec2.model.DescribeFlowLogsResult; +import com.amazonaws.services.ec2.model.DescribeFpgaImageAttributeRequest; +import com.amazonaws.services.ec2.model.DescribeFpgaImageAttributeResult; import com.amazonaws.services.ec2.model.DescribeFpgaImagesRequest; import com.amazonaws.services.ec2.model.DescribeFpgaImagesResult; import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsRequest; import com.amazonaws.services.ec2.model.DescribeHostReservationOfferingsResult; import com.amazonaws.services.ec2.model.DescribeHostReservationsRequest; import com.amazonaws.services.ec2.model.DescribeHostReservationsResult; -import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatRequest; -import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatResult; -import com.amazonaws.services.ec2.model.DescribeFlowLogsRequest; -import com.amazonaws.services.ec2.model.DescribeFlowLogsResult; import com.amazonaws.services.ec2.model.DescribeHostsRequest; import com.amazonaws.services.ec2.model.DescribeHostsResult; import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsRequest; import com.amazonaws.services.ec2.model.DescribeIamInstanceProfileAssociationsResult; import com.amazonaws.services.ec2.model.DescribeIdFormatRequest; import com.amazonaws.services.ec2.model.DescribeIdFormatResult; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribeIdentityIdFormatResult; import com.amazonaws.services.ec2.model.DescribeImageAttributeRequest; import com.amazonaws.services.ec2.model.DescribeImageAttributeResult; import com.amazonaws.services.ec2.model.DescribeImagesRequest; @@ -251,6 +344,8 @@ import com.amazonaws.services.ec2.model.DescribeImportSnapshotTasksRequest; import com.amazonaws.services.ec2.model.DescribeImportSnapshotTasksResult; import com.amazonaws.services.ec2.model.DescribeInstanceAttributeRequest; import com.amazonaws.services.ec2.model.DescribeInstanceAttributeResult; +import com.amazonaws.services.ec2.model.DescribeInstanceCreditSpecificationsRequest; +import com.amazonaws.services.ec2.model.DescribeInstanceCreditSpecificationsResult; import com.amazonaws.services.ec2.model.DescribeInstanceStatusRequest; import com.amazonaws.services.ec2.model.DescribeInstanceStatusResult; import com.amazonaws.services.ec2.model.DescribeInstancesRequest; @@ -259,6 +354,10 @@ import com.amazonaws.services.ec2.model.DescribeInternetGatewaysRequest; import com.amazonaws.services.ec2.model.DescribeInternetGatewaysResult; import com.amazonaws.services.ec2.model.DescribeKeyPairsRequest; import com.amazonaws.services.ec2.model.DescribeKeyPairsResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplateVersionsResult; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesRequest; +import com.amazonaws.services.ec2.model.DescribeLaunchTemplatesResult; import com.amazonaws.services.ec2.model.DescribeMovingAddressesRequest; import com.amazonaws.services.ec2.model.DescribeMovingAddressesResult; import com.amazonaws.services.ec2.model.DescribeNatGatewaysRequest; @@ -267,14 +366,18 @@ import com.amazonaws.services.ec2.model.DescribeNetworkAclsRequest; import com.amazonaws.services.ec2.model.DescribeNetworkAclsResult; import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeRequest; import com.amazonaws.services.ec2.model.DescribeNetworkInterfaceAttributeResult; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesRequest; -import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesResult; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsRequest; import com.amazonaws.services.ec2.model.DescribeNetworkInterfacePermissionsResult; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesRequest; +import com.amazonaws.services.ec2.model.DescribeNetworkInterfacesResult; import com.amazonaws.services.ec2.model.DescribePlacementGroupsRequest; import com.amazonaws.services.ec2.model.DescribePlacementGroupsResult; import com.amazonaws.services.ec2.model.DescribePrefixListsRequest; import com.amazonaws.services.ec2.model.DescribePrefixListsResult; +import com.amazonaws.services.ec2.model.DescribePrincipalIdFormatRequest; +import com.amazonaws.services.ec2.model.DescribePrincipalIdFormatResult; +import com.amazonaws.services.ec2.model.DescribePublicIpv4PoolsRequest; +import com.amazonaws.services.ec2.model.DescribePublicIpv4PoolsResult; import com.amazonaws.services.ec2.model.DescribeRegionsRequest; import com.amazonaws.services.ec2.model.DescribeRegionsResult; import com.amazonaws.services.ec2.model.DescribeReservedInstancesListingsRequest; @@ -291,12 +394,10 @@ import com.amazonaws.services.ec2.model.DescribeScheduledInstanceAvailabilityReq import com.amazonaws.services.ec2.model.DescribeScheduledInstanceAvailabilityResult; import com.amazonaws.services.ec2.model.DescribeScheduledInstancesRequest; import com.amazonaws.services.ec2.model.DescribeScheduledInstancesResult; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; -import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsRequest; -import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsResult; import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesRequest; import com.amazonaws.services.ec2.model.DescribeSecurityGroupReferencesResult; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeRequest; import com.amazonaws.services.ec2.model.DescribeSnapshotAttributeResult; import com.amazonaws.services.ec2.model.DescribeSnapshotsRequest; @@ -313,10 +414,20 @@ import com.amazonaws.services.ec2.model.DescribeSpotInstanceRequestsRequest; import com.amazonaws.services.ec2.model.DescribeSpotInstanceRequestsResult; import com.amazonaws.services.ec2.model.DescribeSpotPriceHistoryRequest; import com.amazonaws.services.ec2.model.DescribeSpotPriceHistoryResult; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsRequest; +import com.amazonaws.services.ec2.model.DescribeStaleSecurityGroupsResult; import com.amazonaws.services.ec2.model.DescribeSubnetsRequest; import com.amazonaws.services.ec2.model.DescribeSubnetsResult; import com.amazonaws.services.ec2.model.DescribeTagsRequest; import com.amazonaws.services.ec2.model.DescribeTagsResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayAttachmentsRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayAttachmentsResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayRouteTablesRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayRouteTablesResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayVpcAttachmentsRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewayVpcAttachmentsResult; +import com.amazonaws.services.ec2.model.DescribeTransitGatewaysRequest; +import com.amazonaws.services.ec2.model.DescribeTransitGatewaysResult; import com.amazonaws.services.ec2.model.DescribeVolumeAttributeRequest; import com.amazonaws.services.ec2.model.DescribeVolumeAttributeResult; import com.amazonaws.services.ec2.model.DescribeVolumeStatusRequest; @@ -331,6 +442,14 @@ import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkDnsSupportRequest; import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkRequest; import com.amazonaws.services.ec2.model.DescribeVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionNotificationsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionNotificationsResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointConnectionsResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServiceConfigurationsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServiceConfigurationsResult; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicePermissionsRequest; +import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicePermissionsResult; import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicesRequest; import com.amazonaws.services.ec2.model.DescribeVpcEndpointServicesResult; import com.amazonaws.services.ec2.model.DescribeVpcEndpointsRequest; @@ -353,6 +472,8 @@ import com.amazonaws.services.ec2.model.DetachVolumeRequest; import com.amazonaws.services.ec2.model.DetachVolumeResult; import com.amazonaws.services.ec2.model.DetachVpnGatewayRequest; import com.amazonaws.services.ec2.model.DetachVpnGatewayResult; +import com.amazonaws.services.ec2.model.DisableTransitGatewayRouteTablePropagationRequest; +import com.amazonaws.services.ec2.model.DisableTransitGatewayRouteTablePropagationResult; import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationRequest; import com.amazonaws.services.ec2.model.DisableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkDnsSupportRequest; @@ -361,16 +482,22 @@ import com.amazonaws.services.ec2.model.DisableVpcClassicLinkRequest; import com.amazonaws.services.ec2.model.DisableVpcClassicLinkResult; import com.amazonaws.services.ec2.model.DisassociateAddressRequest; import com.amazonaws.services.ec2.model.DisassociateAddressResult; -import com.amazonaws.services.ec2.model.DisassociateRouteTableRequest; -import com.amazonaws.services.ec2.model.DisassociateRouteTableResult; +import com.amazonaws.services.ec2.model.DisassociateClientVpnTargetNetworkRequest; +import com.amazonaws.services.ec2.model.DisassociateClientVpnTargetNetworkResult; import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileRequest; import com.amazonaws.services.ec2.model.DisassociateIamInstanceProfileResult; -import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockRequest; -import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockResult; +import com.amazonaws.services.ec2.model.DisassociateRouteTableRequest; +import com.amazonaws.services.ec2.model.DisassociateRouteTableResult; import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockRequest; import com.amazonaws.services.ec2.model.DisassociateSubnetCidrBlockResult; +import com.amazonaws.services.ec2.model.DisassociateTransitGatewayRouteTableRequest; +import com.amazonaws.services.ec2.model.DisassociateTransitGatewayRouteTableResult; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockRequest; +import com.amazonaws.services.ec2.model.DisassociateVpcCidrBlockResult; import com.amazonaws.services.ec2.model.DryRunResult; import com.amazonaws.services.ec2.model.DryRunSupportedRequest; +import com.amazonaws.services.ec2.model.EnableTransitGatewayRouteTablePropagationRequest; +import com.amazonaws.services.ec2.model.EnableTransitGatewayRouteTablePropagationResult; import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationRequest; import com.amazonaws.services.ec2.model.EnableVgwRoutePropagationResult; import com.amazonaws.services.ec2.model.EnableVolumeIORequest; @@ -379,6 +506,12 @@ import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportRequest; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkDnsSupportResult; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkRequest; import com.amazonaws.services.ec2.model.EnableVpcClassicLinkResult; +import com.amazonaws.services.ec2.model.ExportClientVpnClientCertificateRevocationListRequest; +import com.amazonaws.services.ec2.model.ExportClientVpnClientCertificateRevocationListResult; +import com.amazonaws.services.ec2.model.ExportClientVpnClientConfigurationRequest; +import com.amazonaws.services.ec2.model.ExportClientVpnClientConfigurationResult; +import com.amazonaws.services.ec2.model.ExportTransitGatewayRoutesRequest; +import com.amazonaws.services.ec2.model.ExportTransitGatewayRoutesResult; import com.amazonaws.services.ec2.model.Filter; import com.amazonaws.services.ec2.model.GetConsoleOutputRequest; import com.amazonaws.services.ec2.model.GetConsoleOutputResult; @@ -386,10 +519,20 @@ import com.amazonaws.services.ec2.model.GetConsoleScreenshotRequest; import com.amazonaws.services.ec2.model.GetConsoleScreenshotResult; import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewRequest; import com.amazonaws.services.ec2.model.GetHostReservationPurchasePreviewResult; +import com.amazonaws.services.ec2.model.GetLaunchTemplateDataRequest; +import com.amazonaws.services.ec2.model.GetLaunchTemplateDataResult; import com.amazonaws.services.ec2.model.GetPasswordDataRequest; import com.amazonaws.services.ec2.model.GetPasswordDataResult; import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteRequest; import com.amazonaws.services.ec2.model.GetReservedInstancesExchangeQuoteResult; +import com.amazonaws.services.ec2.model.GetTransitGatewayAttachmentPropagationsRequest; +import com.amazonaws.services.ec2.model.GetTransitGatewayAttachmentPropagationsResult; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTableAssociationsRequest; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTableAssociationsResult; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTablePropagationsRequest; +import com.amazonaws.services.ec2.model.GetTransitGatewayRouteTablePropagationsResult; +import com.amazonaws.services.ec2.model.ImportClientVpnClientCertificateRevocationListRequest; +import com.amazonaws.services.ec2.model.ImportClientVpnClientCertificateRevocationListResult; import com.amazonaws.services.ec2.model.ImportImageRequest; import com.amazonaws.services.ec2.model.ImportImageResult; import com.amazonaws.services.ec2.model.ImportInstanceRequest; @@ -403,18 +546,32 @@ import com.amazonaws.services.ec2.model.ImportVolumeResult; import com.amazonaws.services.ec2.model.Instance; import com.amazonaws.services.ec2.model.InstanceState; import com.amazonaws.services.ec2.model.InstanceStateName; +import com.amazonaws.services.ec2.model.ModifyCapacityReservationRequest; +import com.amazonaws.services.ec2.model.ModifyCapacityReservationResult; +import com.amazonaws.services.ec2.model.ModifyClientVpnEndpointRequest; +import com.amazonaws.services.ec2.model.ModifyClientVpnEndpointResult; +import com.amazonaws.services.ec2.model.ModifyFleetRequest; +import com.amazonaws.services.ec2.model.ModifyFleetResult; +import com.amazonaws.services.ec2.model.ModifyFpgaImageAttributeRequest; +import com.amazonaws.services.ec2.model.ModifyFpgaImageAttributeResult; import com.amazonaws.services.ec2.model.ModifyHostsRequest; import com.amazonaws.services.ec2.model.ModifyHostsResult; import com.amazonaws.services.ec2.model.ModifyIdFormatRequest; import com.amazonaws.services.ec2.model.ModifyIdFormatResult; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatRequest; +import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatResult; import com.amazonaws.services.ec2.model.ModifyImageAttributeRequest; import com.amazonaws.services.ec2.model.ModifyImageAttributeResult; import com.amazonaws.services.ec2.model.ModifyInstanceAttributeRequest; import com.amazonaws.services.ec2.model.ModifyInstanceAttributeResult; +import com.amazonaws.services.ec2.model.ModifyInstanceCapacityReservationAttributesRequest; +import com.amazonaws.services.ec2.model.ModifyInstanceCapacityReservationAttributesResult; +import com.amazonaws.services.ec2.model.ModifyInstanceCreditSpecificationRequest; +import com.amazonaws.services.ec2.model.ModifyInstanceCreditSpecificationResult; import com.amazonaws.services.ec2.model.ModifyInstancePlacementRequest; import com.amazonaws.services.ec2.model.ModifyInstancePlacementResult; -import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatRequest; -import com.amazonaws.services.ec2.model.ModifyIdentityIdFormatResult; +import com.amazonaws.services.ec2.model.ModifyLaunchTemplateRequest; +import com.amazonaws.services.ec2.model.ModifyLaunchTemplateResult; import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeRequest; import com.amazonaws.services.ec2.model.ModifyNetworkInterfaceAttributeResult; import com.amazonaws.services.ec2.model.ModifyReservedInstancesRequest; @@ -425,32 +582,48 @@ import com.amazonaws.services.ec2.model.ModifySpotFleetRequestRequest; import com.amazonaws.services.ec2.model.ModifySpotFleetRequestResult; import com.amazonaws.services.ec2.model.ModifySubnetAttributeRequest; import com.amazonaws.services.ec2.model.ModifySubnetAttributeResult; +import com.amazonaws.services.ec2.model.ModifyTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.ModifyTransitGatewayVpcAttachmentResult; import com.amazonaws.services.ec2.model.ModifyVolumeAttributeRequest; import com.amazonaws.services.ec2.model.ModifyVolumeAttributeResult; import com.amazonaws.services.ec2.model.ModifyVolumeRequest; import com.amazonaws.services.ec2.model.ModifyVolumeResult; import com.amazonaws.services.ec2.model.ModifyVpcAttributeRequest; import com.amazonaws.services.ec2.model.ModifyVpcAttributeResult; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointConnectionNotificationRequest; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointConnectionNotificationResult; import com.amazonaws.services.ec2.model.ModifyVpcEndpointRequest; import com.amazonaws.services.ec2.model.ModifyVpcEndpointResult; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServiceConfigurationRequest; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServiceConfigurationResult; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServicePermissionsRequest; +import com.amazonaws.services.ec2.model.ModifyVpcEndpointServicePermissionsResult; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsRequest; +import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsResult; +import com.amazonaws.services.ec2.model.ModifyVpcTenancyRequest; +import com.amazonaws.services.ec2.model.ModifyVpcTenancyResult; import com.amazonaws.services.ec2.model.MonitorInstancesRequest; import com.amazonaws.services.ec2.model.MonitorInstancesResult; import com.amazonaws.services.ec2.model.MoveAddressToVpcRequest; import com.amazonaws.services.ec2.model.MoveAddressToVpcResult; +import com.amazonaws.services.ec2.model.ProvisionByoipCidrRequest; +import com.amazonaws.services.ec2.model.ProvisionByoipCidrResult; +import com.amazonaws.services.ec2.model.PurchaseHostReservationRequest; +import com.amazonaws.services.ec2.model.PurchaseHostReservationResult; import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingRequest; import com.amazonaws.services.ec2.model.PurchaseReservedInstancesOfferingResult; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesRequest; import com.amazonaws.services.ec2.model.PurchaseScheduledInstancesResult; -import com.amazonaws.services.ec2.model.PurchaseHostReservationRequest; -import com.amazonaws.services.ec2.model.PurchaseHostReservationResult; import com.amazonaws.services.ec2.model.RebootInstancesRequest; import com.amazonaws.services.ec2.model.RebootInstancesResult; import com.amazonaws.services.ec2.model.RegisterImageRequest; import com.amazonaws.services.ec2.model.RegisterImageResult; +import com.amazonaws.services.ec2.model.RejectTransitGatewayVpcAttachmentRequest; +import com.amazonaws.services.ec2.model.RejectTransitGatewayVpcAttachmentResult; +import com.amazonaws.services.ec2.model.RejectVpcEndpointConnectionsRequest; +import com.amazonaws.services.ec2.model.RejectVpcEndpointConnectionsResult; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionRequest; import com.amazonaws.services.ec2.model.RejectVpcPeeringConnectionResult; -import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsRequest; -import com.amazonaws.services.ec2.model.ModifyVpcPeeringConnectionOptionsResult; import com.amazonaws.services.ec2.model.ReleaseAddressRequest; import com.amazonaws.services.ec2.model.ReleaseAddressResult; import com.amazonaws.services.ec2.model.ReleaseHostsRequest; @@ -465,6 +638,8 @@ import com.amazonaws.services.ec2.model.ReplaceRouteRequest; import com.amazonaws.services.ec2.model.ReplaceRouteResult; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationRequest; import com.amazonaws.services.ec2.model.ReplaceRouteTableAssociationResult; +import com.amazonaws.services.ec2.model.ReplaceTransitGatewayRouteRequest; +import com.amazonaws.services.ec2.model.ReplaceTransitGatewayRouteResult; import com.amazonaws.services.ec2.model.ReportInstanceStatusRequest; import com.amazonaws.services.ec2.model.ReportInstanceStatusResult; import com.amazonaws.services.ec2.model.RequestSpotFleetRequest; @@ -472,6 +647,8 @@ import com.amazonaws.services.ec2.model.RequestSpotFleetResult; import com.amazonaws.services.ec2.model.RequestSpotInstancesRequest; import com.amazonaws.services.ec2.model.RequestSpotInstancesResult; import com.amazonaws.services.ec2.model.Reservation; +import com.amazonaws.services.ec2.model.ResetFpgaImageAttributeRequest; +import com.amazonaws.services.ec2.model.ResetFpgaImageAttributeResult; import com.amazonaws.services.ec2.model.ResetImageAttributeRequest; import com.amazonaws.services.ec2.model.ResetImageAttributeResult; import com.amazonaws.services.ec2.model.ResetInstanceAttributeRequest; @@ -482,6 +659,8 @@ import com.amazonaws.services.ec2.model.ResetSnapshotAttributeRequest; import com.amazonaws.services.ec2.model.ResetSnapshotAttributeResult; import com.amazonaws.services.ec2.model.RestoreAddressToClassicRequest; import com.amazonaws.services.ec2.model.RestoreAddressToClassicResult; +import com.amazonaws.services.ec2.model.RevokeClientVpnIngressRequest; +import com.amazonaws.services.ec2.model.RevokeClientVpnIngressResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressRequest; import com.amazonaws.services.ec2.model.RevokeSecurityGroupEgressResult; import com.amazonaws.services.ec2.model.RevokeSecurityGroupIngressRequest; @@ -490,11 +669,15 @@ import com.amazonaws.services.ec2.model.RunInstancesRequest; import com.amazonaws.services.ec2.model.RunInstancesResult; import com.amazonaws.services.ec2.model.RunScheduledInstancesRequest; import com.amazonaws.services.ec2.model.RunScheduledInstancesResult; +import com.amazonaws.services.ec2.model.SearchTransitGatewayRoutesRequest; +import com.amazonaws.services.ec2.model.SearchTransitGatewayRoutesResult; import com.amazonaws.services.ec2.model.StartInstancesRequest; import com.amazonaws.services.ec2.model.StartInstancesResult; import com.amazonaws.services.ec2.model.StopInstancesRequest; import com.amazonaws.services.ec2.model.StopInstancesResult; import com.amazonaws.services.ec2.model.Tag; +import com.amazonaws.services.ec2.model.TerminateClientVpnConnectionsRequest; +import com.amazonaws.services.ec2.model.TerminateClientVpnConnectionsResult; import com.amazonaws.services.ec2.model.TerminateInstancesRequest; import com.amazonaws.services.ec2.model.TerminateInstancesResult; import com.amazonaws.services.ec2.model.UnassignIpv6AddressesRequest; @@ -507,9 +690,11 @@ import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgres import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsEgressResult; import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressRequest; import com.amazonaws.services.ec2.model.UpdateSecurityGroupRuleDescriptionsIngressResult; +import com.amazonaws.services.ec2.model.WithdrawByoipCidrRequest; +import com.amazonaws.services.ec2.model.WithdrawByoipCidrResult; import com.amazonaws.services.ec2.waiters.AmazonEC2Waiters; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.Collection; @@ -563,7 +748,7 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public DescribeInstancesResult describeInstances(DescribeInstancesRequest describeInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { Collection filteredInstances = new ArrayList<>(); logger.debug("--> mocking describeInstances"); @@ -660,49 +845,77 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public AcceptReservedInstancesExchangeQuoteResult acceptReservedInstancesExchangeQuote( AcceptReservedInstancesExchangeQuoteRequest acceptReservedInstancesExchangeQuoteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AcceptTransitGatewayVpcAttachmentResult acceptTransitGatewayVpcAttachment( + AcceptTransitGatewayVpcAttachmentRequest acceptTransitGatewayVpcAttachmentRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AcceptVpcEndpointConnectionsResult acceptVpcEndpointConnections( + AcceptVpcEndpointConnectionsRequest acceptVpcEndpointConnectionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RebootInstancesResult rebootInstances(RebootInstancesRequest rebootInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesResult describeReservedInstances( DescribeReservedInstancesRequest describeReservedInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateFlowLogsResult createFlowLogs(CreateFlowLogsRequest createFlowLogsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeAvailabilityZonesResult describeAvailabilityZones(DescribeAvailabilityZonesRequest describeAvailabilityZonesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RestoreAddressToClassicResult restoreAddressToClassic(RestoreAddressToClassicRequest restoreAddressToClassicRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DetachVolumeResult detachVolume(DetachVolumeRequest detachVolumeRequest) throws AmazonServiceException, AmazonClientException { + public RevokeClientVpnIngressResult revokeClientVpnIngress(RevokeClientVpnIngressRequest revokeClientVpnIngressRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DetachVolumeResult detachVolume(DetachVolumeRequest detachVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteKeyPairResult deleteKeyPair(DeleteKeyPairRequest deleteKeyPairRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteLaunchTemplateResult deleteLaunchTemplate(DeleteLaunchTemplateRequest deleteLaunchTemplateRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteLaunchTemplateVersionsResult deleteLaunchTemplateVersions( + DeleteLaunchTemplateVersionsRequest deleteLaunchTemplateVersionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -713,171 +926,231 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public UnmonitorInstancesResult unmonitorInstances(UnmonitorInstancesRequest unmonitorInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UpdateSecurityGroupRuleDescriptionsIngressResult updateSecurityGroupRuleDescriptionsIngress( UpdateSecurityGroupRuleDescriptionsIngressRequest updateSecurityGroupRuleDescriptionsIngressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public WithdrawByoipCidrResult withdrawByoipCidr(WithdrawByoipCidrRequest withdrawByoipCidrRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UpdateSecurityGroupRuleDescriptionsEgressResult updateSecurityGroupRuleDescriptionsEgress( UpdateSecurityGroupRuleDescriptionsEgressRequest updateSecurityGroupRuleDescriptionsEgressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachVpnGatewayResult attachVpnGateway(AttachVpnGatewayRequest attachVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateImageResult createImage(CreateImageRequest createImageRequest) throws AmazonServiceException, AmazonClientException { + public AuthorizeClientVpnIngressResult authorizeClientVpnIngress(AuthorizeClientVpnIngressRequest authorizeClientVpnIngressRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateImageResult createImage(CreateImageRequest createImageRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSecurityGroupResult deleteSecurityGroup(DeleteSecurityGroupRequest deleteSecurityGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateInstanceExportTaskResult createInstanceExportTask(CreateInstanceExportTaskRequest createInstanceExportTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AuthorizeSecurityGroupEgressResult authorizeSecurityGroupEgress( - AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonServiceException, AmazonClientException { + AuthorizeSecurityGroupEgressRequest authorizeSecurityGroupEgressRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateDhcpOptionsResult associateDhcpOptions(AssociateDhcpOptionsRequest associateDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetPasswordDataResult getPasswordData(GetPasswordDataRequest getPasswordDataRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetReservedInstancesExchangeQuoteResult getReservedInstancesExchangeQuote( GetReservedInstancesExchangeQuoteRequest getReservedInstancesExchangeQuoteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetTransitGatewayAttachmentPropagationsResult getTransitGatewayAttachmentPropagations( + GetTransitGatewayAttachmentPropagationsRequest getTransitGatewayAttachmentPropagationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetTransitGatewayRouteTableAssociationsResult getTransitGatewayRouteTableAssociations( + GetTransitGatewayRouteTableAssociationsRequest getTransitGatewayRouteTableAssociationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetTransitGatewayRouteTablePropagationsResult getTransitGatewayRouteTablePropagations( + GetTransitGatewayRouteTablePropagationsRequest getTransitGatewayRouteTablePropagationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ImportClientVpnClientCertificateRevocationListResult importClientVpnClientCertificateRevocationList( + ImportClientVpnClientCertificateRevocationListRequest importClientVpnClientCertificateRevocationListRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public StopInstancesResult stopInstances(StopInstancesRequest stopInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public TerminateClientVpnConnectionsResult terminateClientVpnConnections( + TerminateClientVpnConnectionsRequest terminateClientVpnConnectionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ImportKeyPairResult importKeyPair(ImportKeyPairRequest importKeyPairRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkInterfaceResult deleteNetworkInterface(DeleteNetworkInterfaceRequest deleteNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVpcAttributeResult modifyVpcAttribute(ModifyVpcAttributeRequest modifyVpcAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotFleetInstancesResult describeSpotFleetInstances(DescribeSpotFleetInstancesRequest describeSpotFleetInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateSecurityGroupResult createSecurityGroup(CreateSecurityGroupRequest createSecurityGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotPriceHistoryResult describeSpotPriceHistory(DescribeSpotPriceHistoryRequest describeSpotPriceHistoryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkInterfacesResult describeNetworkInterfaces(DescribeNetworkInterfacesRequest describeNetworkInterfacesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkInterfacePermissionsResult describeNetworkInterfacePermissions( DescribeNetworkInterfacePermissionsRequest describeNetworkInterfacePermissionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeRegionsResult describeRegions(DescribeRegionsRequest describeRegionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateDhcpOptionsResult createDhcpOptions(CreateDhcpOptionsRequest createDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateReservedInstancesListingResult createReservedInstancesListing( CreateReservedInstancesListingRequest createReservedInstancesListingRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpcEndpointsResult deleteVpcEndpoints(DeleteVpcEndpointsRequest deleteVpcEndpointsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ResetSnapshotAttributeResult resetSnapshotAttribute(ResetSnapshotAttributeRequest resetSnapshotAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteRouteResult deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonServiceException, AmazonClientException { + public DeleteRouteResult deleteRoute(DeleteRouteRequest deleteRouteRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeInternetGatewaysResult describeInternetGateways(DescribeInternetGatewaysRequest describeInternetGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportVolumeResult importVolume(ImportVolumeRequest importVolumeRequest) throws AmazonServiceException, AmazonClientException { + public ImportVolumeResult importVolume(ImportVolumeRequest importVolumeRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyCapacityReservationResult modifyCapacityReservation(ModifyCapacityReservationRequest modifyCapacityReservationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyClientVpnEndpointResult modifyClientVpnEndpoint(ModifyClientVpnEndpointRequest modifyClientVpnEndpointRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyFleetResult modifyFleet(ModifyFleetRequest modifyFleetRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyFpgaImageAttributeResult modifyFpgaImageAttribute(ModifyFpgaImageAttributeRequest modifyFpgaImageAttributeRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -893,514 +1166,599 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public DescribeSecurityGroupsResult describeSecurityGroups(DescribeSecurityGroupsRequest describeSecurityGroupsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeStaleSecurityGroupsResult describeStaleSecurityGroups( DescribeStaleSecurityGroupsRequest describeStaleSecurityGroupsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSecurityGroupReferencesResult describeSecurityGroupReferences( DescribeSecurityGroupReferencesRequest describeSecurityGroupReferencesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RejectVpcPeeringConnectionResult rejectVpcPeeringConnection( RejectVpcPeeringConnectionRequest rejectVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVpcPeeringConnectionOptionsResult modifyVpcPeeringConnectionOptions( ModifyVpcPeeringConnectionOptionsRequest modifyVpcPeeringConnectionOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcTenancyResult modifyVpcTenancy(ModifyVpcTenancyRequest modifyVpcTenancyRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteFlowLogsResult deleteFlowLogs(DeleteFlowLogsRequest deleteFlowLogsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteFpgaImageResult deleteFpgaImage(DeleteFpgaImageRequest deleteFpgaImageRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachVpnGatewayResult detachVpnGateway(DetachVpnGatewayRequest detachVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisableTransitGatewayRouteTablePropagationResult disableTransitGatewayRouteTablePropagation( + DisableTransitGatewayRouteTablePropagationRequest disableTransitGatewayRouteTablePropagationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeregisterImageResult deregisterImage(DeregisterImageRequest deregisterImageRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription( DescribeSpotDatafeedSubscriptionRequest describeSpotDatafeedSubscriptionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonServiceException, AmazonClientException { + public DeleteTagsResult deleteTags(DeleteTagsRequest deleteTagsRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTransitGatewayResult deleteTransitGateway(DeleteTransitGatewayRequest deleteTransitGatewayRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTransitGatewayRouteResult deleteTransitGatewayRoute(DeleteTransitGatewayRouteRequest deleteTransitGatewayRouteRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTransitGatewayRouteTableResult deleteTransitGatewayRouteTable( + DeleteTransitGatewayRouteTableRequest deleteTransitGatewayRouteTableRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteTransitGatewayVpcAttachmentResult deleteTransitGatewayVpcAttachment( + DeleteTransitGatewayVpcAttachmentRequest deleteTransitGatewayVpcAttachmentRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSubnetResult deleteSubnet(DeleteSubnetRequest deleteSubnetRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeAccountAttributesResult describeAccountAttributes(DescribeAccountAttributesRequest describeAccountAttributesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachClassicLinkVpcResult attachClassicLinkVpc(AttachClassicLinkVpcRequest attachClassicLinkVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpnGatewayResult createVpnGateway(CreateVpnGatewayRequest createVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteClientVpnEndpointResult deleteClientVpnEndpoint(DeleteClientVpnEndpointRequest deleteClientVpnEndpointRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteClientVpnRouteResult deleteClientVpnRoute(DeleteClientVpnRouteRequest deleteClientVpnRouteRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public EnableVolumeIOResult enableVolumeIO(EnableVolumeIORequest enableVolumeIORequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public MoveAddressToVpcResult moveAddressToVpc(MoveAddressToVpcRequest moveAddressToVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ProvisionByoipCidrResult provisionByoipCidr(ProvisionByoipCidrRequest provisionByoipCidrRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpnGatewayResult deleteVpnGateway(DeleteVpnGatewayRequest deleteVpnGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public AttachVolumeResult attachVolume(AttachVolumeRequest attachVolumeRequest) throws AmazonServiceException, AmazonClientException { + public DeprovisionByoipCidrResult deprovisionByoipCidr(DeprovisionByoipCidrRequest deprovisionByoipCidrRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AttachVolumeResult attachVolume(AttachVolumeRequest attachVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumeStatusResult describeVolumeStatus(DescribeVolumeStatusRequest describeVolumeStatusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumesModificationsResult describeVolumesModifications( DescribeVolumesModificationsRequest describeVolumesModificationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImportSnapshotTasksResult describeImportSnapshotTasks( DescribeImportSnapshotTasksRequest describeImportSnapshotTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpnConnectionsResult describeVpnConnections(DescribeVpnConnectionsRequest describeVpnConnectionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ResetImageAttributeResult resetImageAttribute(ResetImageAttributeRequest resetImageAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public EnableVgwRoutePropagationResult enableVgwRoutePropagation(EnableVgwRoutePropagationRequest enableVgwRoutePropagationRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateSnapshotResult createSnapshot(CreateSnapshotRequest createSnapshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteVolumeResult deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVolumeResult deleteVolume(DeleteVolumeRequest deleteVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkInterfaceResult createNetworkInterface(CreateNetworkInterfaceRequest createNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyReservedInstancesResult modifyReservedInstances(ModifyReservedInstancesRequest modifyReservedInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelSpotFleetRequestsResult cancelSpotFleetRequests(CancelSpotFleetRequestsRequest cancelSpotFleetRequestsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UnassignPrivateIpAddressesResult unassignPrivateIpAddresses(UnassignPrivateIpAddressesRequest unassignPrivateIpAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public UnassignIpv6AddressesResult unassignIpv6Addresses(UnassignIpv6AddressesRequest unassignIpv6AddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcsResult describeVpcs(DescribeVpcsRequest describeVpcsRequest) throws AmazonServiceException, AmazonClientException { + public DescribeVpcsResult describeVpcs(DescribeVpcsRequest describeVpcsRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelConversionTaskResult cancelConversionTask(CancelConversionTaskRequest cancelConversionTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateAddressResult associateAddress(AssociateAddressRequest associateAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateClientVpnTargetNetworkResult associateClientVpnTargetNetwork( + AssociateClientVpnTargetNetworkRequest associateClientVpnTargetNetworkRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateIamInstanceProfileResult associateIamInstanceProfile(AssociateIamInstanceProfileRequest associateIamInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateVpcCidrBlockResult associateVpcCidrBlock(AssociateVpcCidrBlockRequest associateVpcCidrBlockRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateSubnetCidrBlockResult associateSubnetCidrBlock(AssociateSubnetCidrBlockRequest associateSubnetCidrBlockRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public AssociateTransitGatewayRouteTableResult associateTransitGatewayRouteTable( + AssociateTransitGatewayRouteTableRequest associateTransitGatewayRouteTableRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteCustomerGatewayResult deleteCustomerGateway(DeleteCustomerGatewayRequest deleteCustomerGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkAclEntryResult createNetworkAclEntry(CreateNetworkAclEntryRequest createNetworkAclEntryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection(AcceptVpcPeeringConnectionRequest acceptVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeExportTasksResult describeExportTasks(DescribeExportTasksRequest describeExportTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeElasticGpusResult describeElasticGpus(DescribeElasticGpusRequest describeElasticGpusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeFpgaImagesResult describeFpgaImages(DescribeFpgaImagesRequest describeFpgaImagesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeHostReservationOfferingsResult describeHostReservationOfferings( DescribeHostReservationOfferingsRequest describeHostReservationOfferingsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeHostReservationsResult describeHostReservations(DescribeHostReservationsRequest describeHostReservationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeIdentityIdFormatResult describeIdentityIdFormat(DescribeIdentityIdFormatRequest describeIdentityIdFormatRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachInternetGatewayResult detachInternetGateway(DetachInternetGatewayRequest detachInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpcPeeringConnectionResult createVpcPeeringConnection(CreateVpcPeeringConnectionRequest createVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateRouteTableResult createRouteTable(CreateRouteTableRequest createRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelImportTaskResult cancelImportTask(CancelImportTaskRequest cancelImportTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumesResult describeVolumes(DescribeVolumesRequest describeVolumesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesListingsResult describeReservedInstancesListings( DescribeReservedInstancesListingsRequest describeReservedInstancesListingsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReportInstanceStatusResult reportInstanceStatus(ReportInstanceStatusRequest reportInstanceStatusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeRouteTablesResult describeRouteTables(DescribeRouteTablesRequest describeRouteTablesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeDhcpOptionsResult describeDhcpOptions(DescribeDhcpOptionsRequest describeDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeEgressOnlyInternetGatewaysResult describeEgressOnlyInternetGateways( DescribeEgressOnlyInternetGatewaysRequest describeEgressOnlyInternetGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public MonitorInstancesResult monitorInstances(MonitorInstancesRequest monitorInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribePrefixListsResult describePrefixLists(DescribePrefixListsRequest describePrefixListsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RequestSpotFleetResult requestSpotFleet(RequestSpotFleetRequest requestSpotFleetRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImportImageTasksResult describeImportImageTasks(DescribeImportImageTasksRequest describeImportImageTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkAclsResult describeNetworkAcls(DescribeNetworkAclsRequest describeNetworkAclsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeBundleTasksResult describeBundleTasks(DescribeBundleTasksRequest describeBundleTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ImportInstanceResult importInstance(ImportInstanceRequest importInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpcPeeringConnectionResult deleteVpcPeeringConnection(DeleteVpcPeeringConnectionRequest deleteVpcPeeringConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetConsoleOutputResult getConsoleOutput(GetConsoleOutputRequest getConsoleOutputRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetConsoleScreenshotResult getConsoleScreenshot(GetConsoleScreenshotRequest getConsoleScreenshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public GetHostReservationPurchasePreviewResult getHostReservationPurchasePreview( GetHostReservationPurchasePreviewRequest getHostReservationPurchasePreviewRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public GetLaunchTemplateDataResult getLaunchTemplateData(GetLaunchTemplateDataRequest getLaunchTemplateDataRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateInternetGatewayResult createInternetGateway(CreateInternetGatewayRequest createInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpnConnectionRouteResult deleteVpnConnectionRoute(DeleteVpnConnectionRouteRequest deleteVpnConnectionRouteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachNetworkInterfaceResult detachNetworkInterface(DetachNetworkInterfaceRequest detachNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyImageAttributeResult modifyImageAttribute(ModifyImageAttributeRequest modifyImageAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateCustomerGatewayResult createCustomerGateway(CreateCustomerGatewayRequest createCustomerGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateDefaultSubnetResult createDefaultSubnet(CreateDefaultSubnetRequest createDefaultSubnetRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateEgressOnlyInternetGatewayResult createEgressOnlyInternetGateway( CreateEgressOnlyInternetGatewayRequest createEgressOnlyInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateFleetResult createFleet(CreateFleetRequest createFleetRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateFpgaImageResult createFpgaImage(CreateFpgaImageRequest createFpgaImageRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkInterfacePermissionResult createNetworkInterfacePermission( CreateNetworkInterfacePermissionRequest createNetworkInterfacePermissionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateDefaultVpcResult createDefaultVpc(CreateDefaultVpcRequest createDefaultVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateSpotDatafeedSubscriptionResult createSpotDatafeedSubscription( CreateSpotDatafeedSubscriptionRequest createSpotDatafeedSubscriptionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachInternetGatewayResult attachInternetGateway(AttachInternetGatewayRequest attachInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteVpnConnectionResult deleteVpnConnection(DeleteVpnConnectionRequest deleteVpnConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeMovingAddressesResult describeMovingAddresses(DescribeMovingAddressesRequest describeMovingAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeConversionTasksResult describeConversionTasks(DescribeConversionTasksRequest describeConversionTasksRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpnConnectionResult createVpnConnection(CreateVpnConnectionRequest createVpnConnectionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportImageResult importImage(ImportImageRequest importImageRequest) throws AmazonServiceException, AmazonClientException { + public ImportImageResult importImage(ImportImageRequest importImageRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisableVpcClassicLinkResult disableVpcClassicLink(DisableVpcClassicLinkRequest disableVpcClassicLinkRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1412,31 +1770,37 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public DescribeInstanceAttributeResult describeInstanceAttribute(DescribeInstanceAttributeRequest describeInstanceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeInstanceCreditSpecificationsResult describeInstanceCreditSpecifications( + DescribeInstanceCreditSpecificationsRequest describeInstanceCreditSpecificationsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeFlowLogsResult describeFlowLogs(DescribeFlowLogsRequest describeFlowLogsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections( DescribeVpcPeeringConnectionsRequest describeVpcPeeringConnectionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribePlacementGroupsResult describePlacementGroups(DescribePlacementGroupsRequest describePlacementGroupsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public RunInstancesResult runInstances(RunInstancesRequest runInstancesRequest) throws AmazonServiceException, AmazonClientException { + public RunInstancesResult runInstances(RunInstancesRequest runInstancesRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1445,45 +1809,63 @@ public class AmazonEC2Mock implements AmazonEC2 { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public SearchTransitGatewayRoutesResult searchTransitGatewayRoutes( + SearchTransitGatewayRoutesRequest searchTransitGatewayRoutesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public DescribeSubnetsResult describeSubnets(DescribeSubnetsRequest describeSubnetsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssociateRouteTableResult associateRouteTable(AssociateRouteTableRequest associateRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVolumeAttributeResult modifyVolumeAttribute(ModifyVolumeAttributeRequest modifyVolumeAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkAclResult deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImagesResult describeImages(DescribeImagesRequest describeImagesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public StartInstancesResult startInstances(StartInstancesRequest startInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyInstanceAttributeResult modifyInstanceAttribute(ModifyInstanceAttributeRequest modifyInstanceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyInstanceCapacityReservationAttributesResult modifyInstanceCapacityReservationAttributes( + ModifyInstanceCapacityReservationAttributesRequest modifyInstanceCapacityReservationAttributesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyInstanceCreditSpecificationResult modifyInstanceCreditSpecification( + ModifyInstanceCreditSpecificationRequest modifyInstanceCreditSpecificationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1492,6 +1874,11 @@ public class AmazonEC2Mock implements AmazonEC2 { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ModifyLaunchTemplateResult modifyLaunchTemplate(ModifyLaunchTemplateRequest modifyLaunchTemplateRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public ModifyIdentityIdFormatResult modifyIdentityIdFormat(ModifyIdentityIdFormatRequest modifyIdentityIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); @@ -1500,175 +1887,224 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public CancelReservedInstancesListingResult cancelReservedInstancesListing( CancelReservedInstancesListingRequest cancelReservedInstancesListingRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteDhcpOptionsResult deleteDhcpOptions(DeleteDhcpOptionsRequest deleteDhcpOptionsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteEgressOnlyInternetGatewayResult deleteEgressOnlyInternetGateway( DeleteEgressOnlyInternetGatewayRequest deleteEgressOnlyInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteFleetsResult deleteFleets(DeleteFleetsRequest deleteFleetsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkInterfacePermissionResult deleteNetworkInterfacePermission( DeleteNetworkInterfacePermissionRequest deleteNetworkInterfacePermissionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AuthorizeSecurityGroupIngressResult authorizeSecurityGroupIngress( AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests( - DescribeSpotInstanceRequestsRequest describeSpotInstanceRequestsRequest) throws AmazonServiceException, AmazonClientException { + DescribeSpotInstanceRequestsRequest describeSpotInstanceRequestsRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateVpcResult createVpc(CreateVpcRequest createVpcRequest) throws AmazonServiceException, AmazonClientException { + public CreateVpcResult createVpc(CreateVpcRequest createVpcRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeCustomerGatewaysResult describeCustomerGateways(DescribeCustomerGatewaysRequest describeCustomerGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelExportTaskResult cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateRouteResult createRoute(CreateRouteRequest createRouteRequest) throws AmazonServiceException, AmazonClientException { + public CreateRouteResult createRoute(CreateRouteRequest createRouteRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateVpcEndpointResult createVpcEndpoint(CreateVpcEndpointRequest createVpcEndpointRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CopyImageResult copyImage(CopyImageRequest copyImageRequest) throws AmazonServiceException, AmazonClientException { + public CreateVpcEndpointConnectionNotificationResult createVpcEndpointConnectionNotification( + CreateVpcEndpointConnectionNotificationRequest createVpcEndpointConnectionNotificationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateVpcEndpointServiceConfigurationResult createVpcEndpointServiceConfiguration( + CreateVpcEndpointServiceConfigurationRequest createVpcEndpointServiceConfigurationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CopyImageResult copyImage(CopyImageRequest copyImageRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcClassicLinkResult describeVpcClassicLink(DescribeVpcClassicLinkRequest describeVpcClassicLinkRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyNetworkInterfaceAttributeResult modifyNetworkInterfaceAttribute( ModifyNetworkInterfaceAttributeRequest modifyNetworkInterfaceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteRouteTableResult deleteRouteTable(DeleteRouteTableRequest deleteRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeNetworkInterfaceAttributeResult describeNetworkInterfaceAttribute( DescribeNetworkInterfaceAttributeRequest describeNetworkInterfaceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeClassicLinkInstancesResult describeClassicLinkInstances( - DescribeClassicLinkInstancesRequest describeClassicLinkInstancesRequest) throws AmazonServiceException, AmazonClientException { + DescribeClassicLinkInstancesRequest describeClassicLinkInstancesRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RequestSpotInstancesResult requestSpotInstances(RequestSpotInstancesRequest requestSpotInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateTagsResult createTags(CreateTagsRequest createTagsRequest) throws AmazonServiceException, AmazonClientException { + public ResetFpgaImageAttributeResult resetFpgaImageAttribute(ResetFpgaImageAttributeRequest resetFpgaImageAttributeRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTagsResult createTags(CreateTagsRequest createTagsRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayResult createTransitGateway(CreateTransitGatewayRequest createTransitGatewayRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayRouteResult createTransitGatewayRoute(CreateTransitGatewayRouteRequest createTransitGatewayRouteRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayRouteTableResult createTransitGatewayRouteTable( + CreateTransitGatewayRouteTableRequest createTransitGatewayRouteTableRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateTransitGatewayVpcAttachmentResult createTransitGatewayVpcAttachment( + CreateTransitGatewayVpcAttachmentRequest createTransitGatewayVpcAttachmentRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVolumeAttributeResult describeVolumeAttribute(DescribeVolumeAttributeRequest describeVolumeAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AttachNetworkInterfaceResult attachNetworkInterface(AttachNetworkInterfaceRequest attachNetworkInterfaceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ReplaceRouteResult replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonServiceException, AmazonClientException { + public ReplaceRouteResult replaceRoute(ReplaceRouteRequest replaceRouteRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeTagsResult describeTags(DescribeTagsRequest describeTagsRequest) throws AmazonServiceException, AmazonClientException { + public DescribeTagsResult describeTags(DescribeTagsRequest describeTagsRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelBundleTaskResult cancelBundleTask(CancelBundleTaskRequest cancelBundleTaskRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CancelCapacityReservationResult cancelCapacityReservation(CancelCapacityReservationRequest cancelCapacityReservationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisableVgwRoutePropagationResult disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ImportSnapshotResult importSnapshot(ImportSnapshotRequest importSnapshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CancelSpotInstanceRequestsResult cancelSpotInstanceRequests(CancelSpotInstanceRequestsRequest cancelSpotInstanceRequestsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotFleetRequestsResult describeSpotFleetRequests(DescribeSpotFleetRequestsRequest describeSpotFleetRequestsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public PurchaseReservedInstancesOfferingResult purchaseReservedInstancesOffering( PurchaseReservedInstancesOfferingRequest purchaseReservedInstancesOfferingRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1685,243 +2121,323 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public ModifySnapshotAttributeResult modifySnapshotAttribute(ModifySnapshotAttributeRequest modifySnapshotAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesModificationsResult describeReservedInstancesModifications( DescribeReservedInstancesModificationsRequest describeReservedInstancesModificationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public TerminateInstancesResult terminateInstances(TerminateInstancesRequest terminateInstancesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifyVpcEndpointResult modifyVpcEndpoint(ModifyVpcEndpointRequest modifyVpcEndpointRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcEndpointConnectionNotificationResult modifyVpcEndpointConnectionNotification( + ModifyVpcEndpointConnectionNotificationRequest modifyVpcEndpointConnectionNotificationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcEndpointServiceConfigurationResult modifyVpcEndpointServiceConfiguration( + ModifyVpcEndpointServiceConfigurationRequest modifyVpcEndpointServiceConfigurationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ModifyVpcEndpointServicePermissionsResult modifyVpcEndpointServicePermissions( + ModifyVpcEndpointServicePermissionsRequest modifyVpcEndpointServicePermissionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription( DeleteSpotDatafeedSubscriptionRequest deleteSpotDatafeedSubscriptionRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteInternetGatewayResult deleteInternetGateway(DeleteInternetGatewayRequest deleteInternetGatewayRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSnapshotAttributeResult describeSnapshotAttribute(DescribeSnapshotAttributeRequest describeSnapshotAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReplaceRouteTableAssociationResult replaceRouteTableAssociation( - ReplaceRouteTableAssociationRequest replaceRouteTableAssociationRequest) throws AmazonServiceException, AmazonClientException { + ReplaceRouteTableAssociationRequest replaceRouteTableAssociationRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ReplaceTransitGatewayRouteResult replaceTransitGatewayRoute( + ReplaceTransitGatewayRouteRequest replaceTransitGatewayRouteRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeAddressesResult describeAddresses(DescribeAddressesRequest describeAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeImageAttributeResult describeImageAttribute(DescribeImageAttributeRequest describeImageAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeKeyPairsResult describeKeyPairs(DescribeKeyPairsRequest describeKeyPairsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ConfirmProductInstanceResult confirmProductInstance(ConfirmProductInstanceRequest confirmProductInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CopyFpgaImageResult copyFpgaImage(CopyFpgaImageRequest copyFpgaImageRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateRouteTableResult disassociateRouteTable(DisassociateRouteTableRequest disassociateRouteTableRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateIamInstanceProfileResult disassociateIamInstanceProfile( DisassociateIamInstanceProfileRequest disassociateIamInstanceProfileRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateVpcCidrBlockResult disassociateVpcCidrBlock(DisassociateVpcCidrBlockRequest disassociateVpcCidrBlockRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public EnableTransitGatewayRouteTablePropagationResult enableTransitGatewayRouteTablePropagation( + EnableTransitGatewayRouteTablePropagationRequest enableTransitGatewayRouteTablePropagationRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateSubnetCidrBlockResult disassociateSubnetCidrBlock( - DisassociateSubnetCidrBlockRequest disassociateSubnetCidrBlockRequest) throws AmazonServiceException, AmazonClientException { + DisassociateSubnetCidrBlockRequest disassociateSubnetCidrBlockRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateTransitGatewayRouteTableResult disassociateTransitGatewayRouteTable( + DisassociateTransitGatewayRouteTableRequest disassociateTransitGatewayRouteTableRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcAttributeResult describeVpcAttribute(DescribeVpcAttributeRequest describeVpcAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RevokeSecurityGroupEgressResult revokeSecurityGroupEgress(RevokeSecurityGroupEgressRequest revokeSecurityGroupEgressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteNetworkAclEntryResult deleteNetworkAclEntry(DeleteNetworkAclEntryRequest deleteNetworkAclEntryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateVolumeResult createVolume(CreateVolumeRequest createVolumeRequest) throws AmazonServiceException, AmazonClientException { + public CreateVolumeResult createVolume(CreateVolumeRequest createVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ModifyVolumeResult modifyVolume(ModifyVolumeRequest modifyVolumeRequest) throws AmazonServiceException, AmazonClientException { + public ModifyVolumeResult modifyVolume(ModifyVolumeRequest modifyVolumeRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeInstanceStatusResult describeInstanceStatus(DescribeInstanceStatusRequest describeInstanceStatusRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpnGatewaysResult describeVpnGateways(DescribeVpnGatewaysRequest describeVpnGatewaysRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateSubnetResult createSubnet(CreateSubnetRequest createSubnetRequest) throws AmazonServiceException, AmazonClientException { + public CreateSubnetResult createSubnet(CreateSubnetRequest createSubnetRequest) throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings( DescribeReservedInstancesOfferingsRequest describeReservedInstancesOfferingsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssignPrivateIpAddressesResult assignPrivateIpAddresses(AssignPrivateIpAddressesRequest assignPrivateIpAddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AssignIpv6AddressesResult assignIpv6Addresses(AssignIpv6AddressesRequest assignIpv6AddressesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSpotFleetRequestHistoryResult describeSpotFleetRequestHistory( DescribeSpotFleetRequestHistoryRequest describeSpotFleetRequestHistoryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeleteSnapshotResult deleteSnapshot(DeleteSnapshotRequest deleteSnapshotRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReplaceNetworkAclAssociationResult replaceNetworkAclAssociation( ReplaceNetworkAclAssociationRequest replaceNetworkAclAssociationRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DisassociateAddressResult disassociateAddress(DisassociateAddressRequest disassociateAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DisassociateClientVpnTargetNetworkResult disassociateClientVpnTargetNetwork( + DisassociateClientVpnTargetNetworkRequest disassociateClientVpnTargetNetworkRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreatePlacementGroupResult createPlacementGroup(CreatePlacementGroupRequest createPlacementGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public BundleInstanceResult bundleInstance(BundleInstanceRequest bundleInstanceRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DeletePlacementGroupResult deletePlacementGroup(DeletePlacementGroupRequest deletePlacementGroupRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ModifySubnetAttributeResult modifySubnetAttribute(ModifySubnetAttributeRequest modifySubnetAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteVpcResult deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonServiceException, AmazonClientException { + public ModifyTransitGatewayVpcAttachmentResult modifyTransitGatewayVpcAttachment( + ModifyTransitGatewayVpcAttachmentRequest modifyTransitGatewayVpcAttachmentRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CopySnapshotResult copySnapshot(CopySnapshotRequest copySnapshotRequest) throws AmazonServiceException, AmazonClientException { + public DeleteVpcResult deleteVpc(DeleteVpcRequest deleteVpcRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteVpcEndpointConnectionNotificationsResult deleteVpcEndpointConnectionNotifications( + DeleteVpcEndpointConnectionNotificationsRequest deleteVpcEndpointConnectionNotificationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DeleteVpcEndpointServiceConfigurationsResult deleteVpcEndpointServiceConfigurations( + DeleteVpcEndpointServiceConfigurationsRequest deleteVpcEndpointServiceConfigurationsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CopySnapshotResult copySnapshot(CopySnapshotRequest copySnapshotRequest) throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateCapacityReservationResult createCapacityReservation(CreateCapacityReservationRequest createCapacityReservationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateClientVpnEndpointResult createClientVpnEndpoint(CreateClientVpnEndpointRequest createClientVpnEndpointRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateClientVpnRouteResult createClientVpnRoute(CreateClientVpnRouteRequest createClientVpnRouteRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcEndpointServicesResult describeVpcEndpointServices( DescribeVpcEndpointServicesRequest describeVpcEndpointServicesRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public AllocateAddressResult allocateAddress(AllocateAddressRequest allocateAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ReleaseAddressResult releaseAddress(ReleaseAddressRequest releaseAddressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1938,13 +2454,24 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public ResetInstanceAttributeResult resetInstanceAttribute(ResetInstanceAttributeRequest resetInstanceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateKeyPairResult createKeyPair(CreateKeyPairRequest createKeyPairRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateLaunchTemplateResult createLaunchTemplate(CreateLaunchTemplateRequest createLaunchTemplateRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateLaunchTemplateVersionResult createLaunchTemplateVersion( + CreateLaunchTemplateVersionRequest createLaunchTemplateVersionRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1955,38 +2482,50 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public ReplaceNetworkAclEntryResult replaceNetworkAclEntry(ReplaceNetworkAclEntryRequest replaceNetworkAclEntryRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeSnapshotsResult describeSnapshots(DescribeSnapshotsRequest describeSnapshotsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public CreateNetworkAclResult createNetworkAcl(CreateNetworkAclRequest createNetworkAclRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RegisterImageResult registerImage(RegisterImageRequest registerImageRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RejectTransitGatewayVpcAttachmentResult rejectTransitGatewayVpcAttachment( + RejectTransitGatewayVpcAttachmentRequest rejectTransitGatewayVpcAttachmentRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public RejectVpcEndpointConnectionsResult rejectVpcEndpointConnections( + RejectVpcEndpointConnectionsRequest rejectVpcEndpointConnectionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public ResetNetworkInterfaceAttributeResult resetNetworkInterfaceAttribute( ResetNetworkInterfaceAttributeRequest resetNetworkInterfaceAttributeRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public EnableVpcClassicLinkResult enableVpcClassicLink(EnableVpcClassicLinkRequest enableVpcClassicLinkRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -1996,122 +2535,160 @@ public class AmazonEC2Mock implements AmazonEC2 { throw new UnsupportedOperationException("Not supported in mock"); } + @Override + public ExportClientVpnClientCertificateRevocationListResult exportClientVpnClientCertificateRevocationList( + ExportClientVpnClientCertificateRevocationListRequest exportClientVpnClientCertificateRevocationListRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ExportClientVpnClientConfigurationResult exportClientVpnClientConfiguration( + ExportClientVpnClientConfigurationRequest exportClientVpnClientConfigurationRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ExportTransitGatewayRoutesResult exportTransitGatewayRoutes( + ExportTransitGatewayRoutesRequest exportTransitGatewayRoutesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + @Override public CreateVpnConnectionRouteResult createVpnConnectionRoute(CreateVpnConnectionRouteRequest createVpnConnectionRouteRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeVpcEndpointsResult describeVpcEndpoints(DescribeVpcEndpointsRequest describeVpcEndpointsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DetachClassicLinkVpcResult detachClassicLinkVpc(DetachClassicLinkVpcRequest detachClassicLinkVpcRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeReservedInstancesResult describeReservedInstances() throws AmazonServiceException, AmazonClientException { + public DescribeReservedInstancesResult describeReservedInstances() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeAvailabilityZonesResult describeAvailabilityZones() throws AmazonServiceException, AmazonClientException { + public DescribeAvailabilityZonesResult describeAvailabilityZones() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotPriceHistoryResult describeSpotPriceHistory() throws AmazonServiceException, AmazonClientException { + public DescribeSpotPriceHistoryResult describeSpotPriceHistory() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeNetworkInterfacesResult describeNetworkInterfaces() throws AmazonServiceException, AmazonClientException { + public DescribeNetworkInterfacesResult describeNetworkInterfaces() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeRegionsResult describeRegions() throws AmazonServiceException, AmazonClientException { + public DescribeRegionsResult describeRegions() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeInternetGatewaysResult describeInternetGateways() throws AmazonServiceException, AmazonClientException { + public DescribeInternetGatewaysResult describeInternetGateways() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSecurityGroupsResult describeSecurityGroups() throws AmazonServiceException, AmazonClientException { + public DescribeSecurityGroupsResult describeSecurityGroups() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { + public DescribeSpotDatafeedSubscriptionResult describeSpotDatafeedSubscription() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeAccountAttributesResult describeAccountAttributes() throws AmazonServiceException, AmazonClientException { + public DescribeAccountAttributesResult describeAccountAttributes() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVolumeStatusResult describeVolumeStatus() throws AmazonServiceException, AmazonClientException { + public DescribeVolumeStatusResult describeVolumeStatus() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeImportSnapshotTasksResult describeImportSnapshotTasks() throws AmazonServiceException, AmazonClientException { + public DescribeImportSnapshotTasksResult describeImportSnapshotTasks() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpnConnectionsResult describeVpnConnections() throws AmazonServiceException, AmazonClientException { + public DescribeVpnConnectionsResult describeVpnConnections() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcsResult describeVpcs() throws AmazonServiceException, AmazonClientException { + public DescribeVpcsResult describeVpcs() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection() throws AmazonServiceException, AmazonClientException { + public AcceptVpcPeeringConnectionResult acceptVpcPeeringConnection() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeExportTasksResult describeExportTasks() throws AmazonServiceException, AmazonClientException { + public AdvertiseByoipCidrResult advertiseByoipCidr(AdvertiseByoipCidrRequest advertiseByoipCidrRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateVpcPeeringConnectionResult createVpcPeeringConnection() throws AmazonServiceException, AmazonClientException { + public DescribeExportTasksResult describeExportTasks() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CancelImportTaskResult cancelImportTask() throws AmazonServiceException, AmazonClientException { + public DescribeFleetHistoryResult describeFleetHistory(DescribeFleetHistoryRequest describeFleetHistoryRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVolumesResult describeVolumes() throws AmazonServiceException, AmazonClientException { + public DescribeFleetInstancesResult describeFleetInstances(DescribeFleetInstancesRequest describeFleetInstancesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeFleetsResult describeFleets(DescribeFleetsRequest describeFleetsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CreateVpcPeeringConnectionResult createVpcPeeringConnection() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public CancelImportTaskResult cancelImportTask() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeVolumesResult describeVolumes() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesListingsResult describeReservedInstancesListings() - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeRouteTablesResult describeRouteTables() throws AmazonServiceException, AmazonClientException { + public DescribeRouteTablesResult describeRouteTables() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2128,48 +2705,69 @@ public class AmazonEC2Mock implements AmazonEC2 { } @Override - public DescribeDhcpOptionsResult describeDhcpOptions() throws AmazonServiceException, AmazonClientException { + public DescribeDhcpOptionsResult describeDhcpOptions() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribePrefixListsResult describePrefixLists() throws AmazonServiceException, AmazonClientException { + public DescribePrefixListsResult describePrefixLists() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeImportImageTasksResult describeImportImageTasks() throws AmazonServiceException, AmazonClientException { + public DescribePrincipalIdFormatResult describePrincipalIdFormat(DescribePrincipalIdFormatRequest describePrincipalIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeNetworkAclsResult describeNetworkAcls() throws AmazonServiceException, AmazonClientException { + public DescribePublicIpv4PoolsResult describePublicIpv4Pools(DescribePublicIpv4PoolsRequest describePublicIpv4PoolsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeBundleTasksResult describeBundleTasks() throws AmazonServiceException, AmazonClientException { + public DescribeImportImageTasksResult describeImportImageTasks() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeNetworkAclsResult describeNetworkAcls() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeBundleTasksResult describeBundleTasks() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeByoipCidrsResult describeByoipCidrs(DescribeByoipCidrsRequest describeByoipCidrsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeCapacityReservationsResult describeCapacityReservations( + DescribeCapacityReservationsRequest describeCapacityReservationsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress(RevokeSecurityGroupIngressRequest revokeSecurityGroupIngressRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress() throws AmazonServiceException, AmazonClientException { + public RevokeSecurityGroupIngressResult revokeSecurityGroupIngress() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public CreateInternetGatewayResult createInternetGateway() throws AmazonServiceException, AmazonClientException { + public CreateInternetGatewayResult createInternetGateway() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeMovingAddressesResult describeMovingAddresses() throws AmazonServiceException, AmazonClientException { + public DescribeMovingAddressesResult describeMovingAddresses() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2179,17 +2777,23 @@ public class AmazonEC2Mock implements AmazonEC2 { } @Override - public DescribeConversionTasksResult describeConversionTasks() throws AmazonServiceException, AmazonClientException { + public DescribeConversionTasksResult describeConversionTasks() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportImageResult importImage() throws AmazonServiceException, AmazonClientException { + public ImportImageResult importImage() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeFlowLogsResult describeFlowLogs() throws AmazonServiceException, AmazonClientException { + public DescribeFlowLogsResult describeFlowLogs() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeFpgaImageAttributeResult describeFpgaImageAttribute( + DescribeFpgaImageAttributeRequest describeFpgaImageAttributeRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2206,7 +2810,7 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public DescribeIamInstanceProfileAssociationsResult describeIamInstanceProfileAssociations( DescribeIamInstanceProfileAssociationsRequest describeIamInstanceProfileAssociationsRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2221,42 +2825,42 @@ public class AmazonEC2Mock implements AmazonEC2 { } @Override - public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections() throws AmazonServiceException, AmazonClientException { + public DescribeVpcPeeringConnectionsResult describeVpcPeeringConnections() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribePlacementGroupsResult describePlacementGroups() throws AmazonServiceException, AmazonClientException { + public DescribePlacementGroupsResult describePlacementGroups() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSubnetsResult describeSubnets() throws AmazonServiceException, AmazonClientException { + public DescribeSubnetsResult describeSubnets() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeInstancesResult describeInstances() throws AmazonServiceException, AmazonClientException { + public DescribeInstancesResult describeInstances() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeImagesResult describeImages() throws AmazonServiceException, AmazonClientException { + public DescribeImagesResult describeImages() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests() throws AmazonServiceException, AmazonClientException { + public DescribeSpotInstanceRequestsResult describeSpotInstanceRequests() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeCustomerGatewaysResult describeCustomerGateways() throws AmazonServiceException, AmazonClientException { + public DescribeCustomerGatewaysResult describeCustomerGateways() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcClassicLinkResult describeVpcClassicLink() throws AmazonServiceException, AmazonClientException { + public DescribeVpcClassicLinkResult describeVpcClassicLink() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2267,69 +2871,162 @@ public class AmazonEC2Mock implements AmazonEC2 { } @Override - public DescribeClassicLinkInstancesResult describeClassicLinkInstances() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointConnectionNotificationsResult describeVpcEndpointConnectionNotifications( + DescribeVpcEndpointConnectionNotificationsRequest describeVpcEndpointConnectionNotificationsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeTagsResult describeTags() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointConnectionsResult describeVpcEndpointConnections( + DescribeVpcEndpointConnectionsRequest describeVpcEndpointConnectionsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public ImportSnapshotResult importSnapshot() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointServiceConfigurationsResult describeVpcEndpointServiceConfigurations( + DescribeVpcEndpointServiceConfigurationsRequest describeVpcEndpointServiceConfigurationsRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeSpotFleetRequestsResult describeSpotFleetRequests() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointServicePermissionsResult describeVpcEndpointServicePermissions( + DescribeVpcEndpointServicePermissionsRequest describeVpcEndpointServicePermissionsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClassicLinkInstancesResult describeClassicLinkInstances() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnAuthorizationRulesResult describeClientVpnAuthorizationRules( + DescribeClientVpnAuthorizationRulesRequest describeClientVpnAuthorizationRulesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnConnectionsResult describeClientVpnConnections( + DescribeClientVpnConnectionsRequest describeClientVpnConnectionsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnEndpointsResult describeClientVpnEndpoints( + DescribeClientVpnEndpointsRequest describeClientVpnEndpointsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnRoutesResult describeClientVpnRoutes( + DescribeClientVpnRoutesRequest describeClientVpnRoutesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeClientVpnTargetNetworksResult describeClientVpnTargetNetworks( + DescribeClientVpnTargetNetworksRequest describeClientVpnTargetNetworksRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTagsResult describeTags() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewayAttachmentsResult describeTransitGatewayAttachments( + DescribeTransitGatewayAttachmentsRequest describeTransitGatewayAttachmentsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewayRouteTablesResult describeTransitGatewayRouteTables( + DescribeTransitGatewayRouteTablesRequest describeTransitGatewayRouteTablesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewayVpcAttachmentsResult describeTransitGatewayVpcAttachments( + DescribeTransitGatewayVpcAttachmentsRequest describeTransitGatewayVpcAttachmentsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeTransitGatewaysResult describeTransitGateways(DescribeTransitGatewaysRequest describeTransitGatewaysRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public ImportSnapshotResult importSnapshot() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeSpotFleetRequestsResult describeSpotFleetRequests() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesModificationsResult describeReservedInstancesModifications() - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription() throws AmazonServiceException, AmazonClientException { + public DeleteSpotDatafeedSubscriptionResult deleteSpotDatafeedSubscription() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeAddressesResult describeAddresses() throws AmazonServiceException, AmazonClientException { + public DescribeAddressesResult describeAddresses() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeKeyPairsResult describeKeyPairs() throws AmazonServiceException, AmazonClientException { + public DescribeAggregateIdFormatResult describeAggregateIdFormat(DescribeAggregateIdFormatRequest describeAggregateIdFormatRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeInstanceStatusResult describeInstanceStatus() throws AmazonServiceException, AmazonClientException { + public DescribeKeyPairsResult describeKeyPairs() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpnGatewaysResult describeVpnGateways() throws AmazonServiceException, AmazonClientException { + public DescribeLaunchTemplateVersionsResult describeLaunchTemplateVersions( + DescribeLaunchTemplateVersionsRequest describeLaunchTemplateVersionsRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeLaunchTemplatesResult describeLaunchTemplates(DescribeLaunchTemplatesRequest describeLaunchTemplatesRequest) { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeInstanceStatusResult describeInstanceStatus() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeVpnGatewaysResult describeVpnGateways() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DescribeReservedInstancesOfferingsResult describeReservedInstancesOfferings() - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcEndpointServicesResult describeVpcEndpointServices() throws AmazonServiceException, AmazonClientException { + public DescribeVpcEndpointServicesResult describeVpcEndpointServices() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public AllocateAddressResult allocateAddress() throws AmazonServiceException, AmazonClientException { + public AllocateAddressResult allocateAddress() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2339,18 +3036,24 @@ public class AmazonEC2Mock implements AmazonEC2 { } @Override - public DescribeSnapshotsResult describeSnapshots() throws AmazonServiceException, AmazonClientException { + public ApplySecurityGroupsToClientVpnTargetNetworkResult applySecurityGroupsToClientVpnTargetNetwork( + ApplySecurityGroupsToClientVpnTargetNetworkRequest applySecurityGroupsToClientVpnTargetNetworkRequest) { throw new UnsupportedOperationException("Not supported in mock"); } @Override - public DescribeVpcEndpointsResult describeVpcEndpoints() throws AmazonServiceException, AmazonClientException { + public DescribeSnapshotsResult describeSnapshots() throws AmazonClientException { + throw new UnsupportedOperationException("Not supported in mock"); + } + + @Override + public DescribeVpcEndpointsResult describeVpcEndpoints() throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @Override public DryRunResult dryRun(DryRunSupportedRequest request) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } @@ -2370,7 +3073,7 @@ public class AmazonEC2Mock implements AmazonEC2 { @Override public ModifySpotFleetRequestResult modifySpotFleetRequest(ModifySpotFleetRequestRequest modifySpotFleetRequestRequest) - throws AmazonServiceException, AmazonClientException { + throws AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 9d7d7e0eb06..6703812a4ec 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -77,7 +77,7 @@ public class Ec2DiscoveryTests extends ESTestCase { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { // we just need to ensure we don't resolve DNS here return new TransportAddress[] {poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress())}; } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceSeedHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceSeedHostsProvider.java index fded7c2445d..d193cb25c6e 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceSeedHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceSeedHostsProvider.java @@ -233,8 +233,7 @@ public class GceSeedHostsProvider implements SeedHostsProvider { // ip_private is a single IP Address. We need to build a TransportAddress from it // If user has set `es_port` metadata, we don't need to ping all ports - // we only limit to 1 addresses, makes no sense to ping 100 ports - TransportAddress[] addresses = transportService.addressesFromString(address, 1); + TransportAddress[] addresses = transportService.addressesFromString(address); for (TransportAddress transportAddress : addresses) { logger.trace("adding {}, type {}, address {}, transport_address {}, status {}", name, type, diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index 3caf29c8513..b750018fefe 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -26,7 +27,7 @@ esplugin { noticeFile rootProject.file('NOTICE.txt') } -integTestCluster { +testClusters.integTest { // Adds a setting in the Elasticsearch keystore before running the integration tests - keystoreSetting 'custom.secured', 'password' -} \ No newline at end of file + keystore 'custom.secured', 'password' +} diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index 977e467391d..a6861c8be63 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ - +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -27,8 +27,8 @@ esplugin { noticeFile rootProject.file('NOTICE.txt') } -integTestCluster { - numNodes = 2 +testClusters.integTest { + numberOfNodes = 2 } // this plugin has no unit tests, only rest tests diff --git a/plugins/examples/painless-whitelist/build.gradle b/plugins/examples/painless-whitelist/build.gradle index 95928c472ca..738a3be86af 100644 --- a/plugins/examples/painless-whitelist/build.gradle +++ b/plugins/examples/painless-whitelist/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -31,8 +32,8 @@ dependencies { compileOnly "org.elasticsearch.plugin:elasticsearch-scripting-painless-spi:${versions.elasticsearch}" } -if (System.getProperty('tests.distribution') == null) { - integTestCluster.distribution = 'oss' +testClusters.integTest { + distribution = 'oss' } test.enabled = false diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle index cdecd760c81..e18805bc547 100644 --- a/plugins/examples/rescore/build.gradle +++ b/plugins/examples/rescore/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index 98dd093ac17..14a6189f9ad 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -36,11 +37,11 @@ task exampleFixture(type: org.elasticsearch.gradle.test.AntFixture) { args 'org.elasticsearch.example.resthandler.ExampleFixture', baseDir, 'TEST' } -integTestCluster { +integTest { dependsOn exampleFixture -} -integTestRunner { - nonInputProperties.systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" + runner { + nonInputProperties.systemProperty 'external.address', "${ -> exampleFixture.addressAndPort }" + } } testingConventions.naming { diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index e9da62acdcf..6f88baccefc 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { diff --git a/plugins/examples/security-authorization-engine/build.gradle b/plugins/examples/security-authorization-engine/build.gradle index f869e4872dd..fba9580525b 100644 --- a/plugins/examples/security-authorization-engine/build.gradle +++ b/plugins/examples/security-authorization-engine/build.gradle @@ -1,3 +1,4 @@ +apply plugin: 'elasticsearch.testclusters' apply plugin: 'elasticsearch.esplugin' esplugin { @@ -14,15 +15,14 @@ dependencies { testCompile "org.elasticsearch.client:x-pack-transport:${versions.elasticsearch}" } - -integTestRunner { +integTest { + dependsOn buildZip + runner { systemProperty 'tests.security.manager', 'false' + } } -integTestCluster { - dependsOn buildZip - distribution = 'default' - +testClusters.integTest { setting 'xpack.security.enabled', 'true' setting 'xpack.ilm.enabled', 'false' setting 'xpack.ml.enabled', 'false' @@ -34,17 +34,7 @@ integTestCluster { // processors are being used that are in ingest-common module. distribution = 'default' - setupCommand 'setupDummyUser', - 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'custom_superuser' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'test_user', - password: 'x-pack-test-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } + user role: 'custom_superuser' } + check.dependsOn integTest diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 17a5c1fb97e..7a4fbfe42ae 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -30,31 +30,45 @@ import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.http.HttpReadTimeoutException; import org.elasticsearch.http.nio.cors.NioCorsConfig; import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.ReadWriteHandler; import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.TaskScheduler; import org.elasticsearch.nio.WriteOperation; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; +import java.util.function.LongSupplier; public class HttpReadWriteHandler implements ReadWriteHandler { private final NettyAdaptor adaptor; private final NioHttpChannel nioHttpChannel; private final NioHttpServerTransport transport; + private final TaskScheduler taskScheduler; + private final LongSupplier nanoClock; + private final long readTimeoutNanos; + private boolean channelRegistered = false; + private boolean requestSinceReadTimeoutTrigger = false; + private int inFlightRequests = 0; public HttpReadWriteHandler(NioHttpChannel nioHttpChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, - NioCorsConfig corsConfig) { + NioCorsConfig corsConfig, TaskScheduler taskScheduler, LongSupplier nanoClock) { this.nioHttpChannel = nioHttpChannel; this.transport = transport; + this.taskScheduler = taskScheduler; + this.nanoClock = nanoClock; + this.readTimeoutNanos = TimeUnit.MILLISECONDS.toNanos(settings.getReadTimeoutMillis()); List handlers = new ArrayList<>(5); HttpRequestDecoder decoder = new HttpRequestDecoder(settings.getMaxInitialLineLength(), settings.getMaxHeaderSize(), @@ -77,10 +91,21 @@ public class HttpReadWriteHandler implements ReadWriteHandler { } @Override - public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { + public void channelRegistered() { + channelRegistered = true; + if (readTimeoutNanos > 0) { + scheduleReadTimeout(); + } + } + + @Override + public int consumeReads(InboundChannelBuffer channelBuffer) { + assert channelRegistered : "channelRegistered should have been called"; int bytesConsumed = adaptor.read(channelBuffer.sliceAndRetainPagesTo(channelBuffer.getIndex())); Object message; while ((message = adaptor.pollInboundMessage()) != null) { + ++inFlightRequests; + requestSinceReadTimeoutTrigger = true; handleRequest(message); } @@ -96,6 +121,11 @@ public class HttpReadWriteHandler implements ReadWriteHandler { @Override public List writeToBytes(WriteOperation writeOperation) { + assert writeOperation.getObject() instanceof NioHttpResponse : "This channel only supports messages that are of type: " + + NioHttpResponse.class + ". Found type: " + writeOperation.getObject().getClass() + "."; + assert channelRegistered : "channelRegistered should have been called"; + --inFlightRequests; + assert inFlightRequests >= 0 : "Inflight requests should never drop below zero, found: " + inFlightRequests; adaptor.write(writeOperation); return pollFlushOperations(); } @@ -152,4 +182,17 @@ public class HttpReadWriteHandler implements ReadWriteHandler { request.release(); } } + + private void maybeReadTimeout() { + if (requestSinceReadTimeoutTrigger == false && inFlightRequests == 0) { + transport.onException(nioHttpChannel, new HttpReadTimeoutException(TimeValue.nsecToMSec(readTimeoutNanos))); + } else { + requestSinceReadTimeoutTrigger = false; + scheduleReadTimeout(); + } + } + + private void scheduleReadTimeout() { + taskScheduler.scheduleAtRelativeTime(this::maybeReadTimeout, nanoClock.getAsLong() + readTimeoutNanos); + } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index 57936ff70c6..fa0f3e9572c 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -25,7 +25,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeValue; @@ -43,16 +42,15 @@ import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.Page; import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.nio.NioGroupFactory; +import org.elasticsearch.transport.nio.PageAllocator; import java.io.IOException; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.Arrays; @@ -80,8 +78,8 @@ import static org.elasticsearch.http.nio.cors.NioCorsHandler.ANY_ORIGIN; public class NioHttpServerTransport extends AbstractHttpServerTransport { private static final Logger logger = LogManager.getLogger(NioHttpServerTransport.class); - protected final PageCacheRecycler pageCacheRecycler; protected final NioCorsConfig corsConfig; + protected final PageAllocator pageAllocator; private final NioGroupFactory nioGroupFactory; protected final boolean tcpNoDelay; @@ -97,7 +95,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { PageCacheRecycler pageCacheRecycler, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, NioGroupFactory nioGroupFactory) { super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher); - this.pageCacheRecycler = pageCacheRecycler; + this.pageAllocator = new PageAllocator(pageCacheRecycler); this.nioGroupFactory = nioGroupFactory; ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); @@ -206,15 +204,11 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { @Override public NioHttpChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { NioHttpChannel httpChannel = new NioHttpChannel(channel); - java.util.function.Supplier pageSupplier = () -> { - Recycler.V bytes = pageCacheRecycler.bytePage(false); - return new Page(ByteBuffer.wrap(bytes.v()), bytes::close); - }; HttpReadWriteHandler httpReadWritePipeline = new HttpReadWriteHandler(httpChannel,NioHttpServerTransport.this, - handlingSettings, corsConfig); + handlingSettings, corsConfig, selector.getTaskScheduler(), threadPool::relativeTimeInMillis); Consumer exceptionHandler = (e) -> onException(httpChannel, e); SocketChannelContext context = new BytesChannelContext(httpChannel, selector, exceptionHandler, httpReadWritePipeline, - new InboundChannelBuffer(pageSupplier)); + new InboundChannelBuffer(pageAllocator)); httpChannel.setContext(context); return httpChannel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 17dc6c41baa..a39098a3d59 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -26,7 +26,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -36,20 +35,17 @@ import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.Page; import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpTransport; import java.io.IOException; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.concurrent.ConcurrentMap; import java.util.function.Consumer; import java.util.function.Function; -import java.util.function.Supplier; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; @@ -57,6 +53,7 @@ public class NioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(NioTransport.class); + protected final PageAllocator pageAllocator; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); private final NioGroupFactory groupFactory; private volatile NioGroup nioGroup; @@ -66,6 +63,7 @@ public class NioTransport extends TcpTransport { PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, NioGroupFactory groupFactory) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + this.pageAllocator = new PageAllocator(pageCacheRecycler); this.groupFactory = groupFactory; } @@ -158,14 +156,10 @@ public class NioTransport extends TcpTransport { @Override public NioTcpChannel createChannel(NioSelector selector, SocketChannel channel) { NioTcpChannel nioChannel = new NioTcpChannel(isClient == false, profileName, channel); - Supplier pageSupplier = () -> { - Recycler.V bytes = pageCacheRecycler.bytePage(false); - return new Page(ByteBuffer.wrap(bytes.v()), bytes::close); - }; TcpReadWriteHandler readWriteHandler = new TcpReadWriteHandler(nioChannel, NioTransport.this); Consumer exceptionHandler = (e) -> onException(nioChannel, e); BytesChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, readWriteHandler, - new InboundChannelBuffer(pageSupplier)); + new InboundChannelBuffer(pageAllocator)); nioChannel.setContext(context); return nioChannel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/PageAllocator.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/PageAllocator.java new file mode 100644 index 00000000000..bf9f3ffc891 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/PageAllocator.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.nio.Page; + +import java.nio.ByteBuffer; +import java.util.function.IntFunction; + +public class PageAllocator implements IntFunction { + + private static final int RECYCLE_LOWER_THRESHOLD = PageCacheRecycler.BYTE_PAGE_SIZE / 2; + + private final PageCacheRecycler recycler; + + public PageAllocator(PageCacheRecycler recycler) { + this.recycler = recycler; + } + + @Override + public Page apply(int length) { + if (length >= RECYCLE_LOWER_THRESHOLD && length <= PageCacheRecycler.BYTE_PAGE_SIZE){ + Recycler.V bytePage = recycler.bytePage(false); + return new Page(ByteBuffer.wrap(bytePage.v(), 0, length), bytePage::close); + } else { + return new Page(ByteBuffer.allocate(length), () -> {}); + } + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index d7e61f21173..93a846ea90f 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -33,12 +33,13 @@ import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; - import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpReadTimeoutException; import org.elasticsearch.http.HttpRequest; import org.elasticsearch.http.HttpResponse; import org.elasticsearch.http.HttpTransportSettings; @@ -48,6 +49,7 @@ import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.TaskScheduler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -56,6 +58,8 @@ import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Iterator; import java.util.List; import java.util.function.BiConsumer; @@ -63,19 +67,14 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CR import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -84,8 +83,9 @@ import static org.mockito.Mockito.verify; public class HttpReadWriteHandlerTests extends ESTestCase { private HttpReadWriteHandler handler; - private NioHttpChannel nioHttpChannel; + private NioHttpChannel channel; private NioHttpServerTransport transport; + private TaskScheduler taskScheduler; private final RequestEncoder requestEncoder = new RequestEncoder(); private final ResponseDecoder responseDecoder = new ResponseDecoder(); @@ -93,22 +93,14 @@ public class HttpReadWriteHandlerTests extends ESTestCase { @Before public void setMocks() { transport = mock(NioHttpServerTransport.class); - Settings settings = Settings.EMPTY; - ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.getDefault(settings); - ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.getDefault(settings); - ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.getDefault(settings); - HttpHandlingSettings httpHandlingSettings = new HttpHandlingSettings(1024, - Math.toIntExact(maxChunkSize.getBytes()), - Math.toIntExact(maxHeaderSize.getBytes()), - Math.toIntExact(maxInitialLineLength.getBytes()), - SETTING_HTTP_RESET_COOKIES.getDefault(settings), - SETTING_HTTP_COMPRESSION.getDefault(settings), - SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings), - SETTING_PIPELINING_MAX_EVENTS.getDefault(settings), - SETTING_CORS_ENABLED.getDefault(settings)); - nioHttpChannel = mock(NioHttpChannel.class); - handler = new HttpReadWriteHandler(nioHttpChannel, transport, httpHandlingSettings, NioCorsConfigBuilder.forAnyOrigin().build()); + Settings settings = Settings.builder().put(SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), new ByteSizeValue(1024)).build(); + HttpHandlingSettings httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); + channel = mock(NioHttpChannel.class); + taskScheduler = mock(TaskScheduler.class); + + NioCorsConfig corsConfig = NioCorsConfigBuilder.forAnyOrigin().build(); + handler = new HttpReadWriteHandler(channel, transport, httpHandlingSettings, corsConfig, taskScheduler, System::nanoTime); + handler.channelRegistered(); } public void testSuccessfulDecodeHttpRequest() throws IOException { @@ -188,7 +180,7 @@ public class HttpReadWriteHandlerTests extends ESTestCase { flushOperation.getListener().accept(null, null); // Since we have keep-alive set to false, we should close the channel after the response has been // flushed - verify(nioHttpChannel).close(); + verify(channel).close(); } finally { response.release(); } @@ -197,11 +189,7 @@ public class HttpReadWriteHandlerTests extends ESTestCase { @SuppressWarnings("unchecked") public void testEncodeHttpResponse() throws IOException { prepareHandlerForResponse(handler); - - DefaultFullHttpRequest nettyRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); - NioHttpRequest nioHttpRequest = new NioHttpRequest(nettyRequest, 0); - NioHttpResponse httpResponse = nioHttpRequest.createResponse(RestStatus.OK, BytesArray.EMPTY); - httpResponse.addHeader(HttpHeaderNames.CONTENT_LENGTH.toString(), "0"); + NioHttpResponse httpResponse = emptyGetResponse(0); SocketChannelContext context = mock(SocketChannelContext.class); HttpWriteOperation writeOperation = new HttpWriteOperation(context, httpResponse, mock(BiConsumer.class)); @@ -335,10 +323,65 @@ public class HttpReadWriteHandlerTests extends ESTestCase { } } - private FullHttpResponse executeCorsRequest(final Settings settings, final String originValue, final String host) throws IOException { + @SuppressWarnings("unchecked") + public void testReadTimeout() throws IOException { + TimeValue timeValue = TimeValue.timeValueMillis(500); + Settings settings = Settings.builder().put(SETTING_HTTP_READ_TIMEOUT.getKey(), timeValue).build(); HttpHandlingSettings httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); - NioCorsConfig nioCorsConfig = NioHttpServerTransport.buildCorsConfig(settings); - HttpReadWriteHandler handler = new HttpReadWriteHandler(nioHttpChannel, transport, httpHandlingSettings, nioCorsConfig); + + NioCorsConfig corsConfig = NioCorsConfigBuilder.forAnyOrigin().build(); + TaskScheduler taskScheduler = new TaskScheduler(); + + Iterator timeValues = Arrays.asList(0, 2, 4, 6, 8).iterator(); + handler = new HttpReadWriteHandler(channel, transport, httpHandlingSettings, corsConfig, taskScheduler, timeValues::next); + handler.channelRegistered(); + + prepareHandlerForResponse(handler); + SocketChannelContext context = mock(SocketChannelContext.class); + HttpWriteOperation writeOperation0 = new HttpWriteOperation(context, emptyGetResponse(0), mock(BiConsumer.class)); + ((ChannelPromise) handler.writeToBytes(writeOperation0).get(0).getListener()).setSuccess(); + + taskScheduler.pollTask(timeValue.getNanos() + 1).run(); + // There was a read. Do not close. + verify(transport, times(0)).onException(eq(channel), any(HttpReadTimeoutException.class)); + + prepareHandlerForResponse(handler); + prepareHandlerForResponse(handler); + + taskScheduler.pollTask(timeValue.getNanos() + 3).run(); + // There was a read. Do not close. + verify(transport, times(0)).onException(eq(channel), any(HttpReadTimeoutException.class)); + + HttpWriteOperation writeOperation1 = new HttpWriteOperation(context, emptyGetResponse(1), mock(BiConsumer.class)); + ((ChannelPromise) handler.writeToBytes(writeOperation1).get(0).getListener()).setSuccess(); + + taskScheduler.pollTask(timeValue.getNanos() + 5).run(); + // There has not been a read, however there is still an inflight request. Do not close. + verify(transport, times(0)).onException(eq(channel), any(HttpReadTimeoutException.class)); + + HttpWriteOperation writeOperation2 = new HttpWriteOperation(context, emptyGetResponse(2), mock(BiConsumer.class)); + ((ChannelPromise) handler.writeToBytes(writeOperation2).get(0).getListener()).setSuccess(); + + taskScheduler.pollTask(timeValue.getNanos() + 7).run(); + // No reads and no inflight requests, close + verify(transport, times(1)).onException(eq(channel), any(HttpReadTimeoutException.class)); + assertNull(taskScheduler.pollTask(timeValue.getNanos() + 9)); + } + + private static NioHttpResponse emptyGetResponse(int sequenceNumber) { + DefaultFullHttpRequest nettyRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + NioHttpRequest nioHttpRequest = new NioHttpRequest(nettyRequest, sequenceNumber); + NioHttpResponse httpResponse = nioHttpRequest.createResponse(RestStatus.OK, BytesArray.EMPTY); + httpResponse.addHeader(HttpHeaderNames.CONTENT_LENGTH.toString(), "0"); + return httpResponse; + } + + private FullHttpResponse executeCorsRequest(final Settings settings, final String originValue, final String host) throws IOException { + HttpHandlingSettings httpSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + HttpReadWriteHandler handler = new HttpReadWriteHandler(channel, transport, httpSettings, corsConfig, taskScheduler, + System::nanoTime); + handler.channelRegistered(); prepareHandlerForResponse(handler); DefaultFullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); if (originValue != null) { @@ -360,7 +403,7 @@ public class HttpReadWriteHandlerTests extends ESTestCase { - private NioHttpRequest prepareHandlerForResponse(HttpReadWriteHandler handler) throws IOException { + private void prepareHandlerForResponse(HttpReadWriteHandler handler) throws IOException { HttpMethod method = randomBoolean() ? HttpMethod.GET : HttpMethod.HEAD; HttpVersion version = randomBoolean() ? HttpVersion.HTTP_1_0 : HttpVersion.HTTP_1_1; String uri = "http://localhost:9090/" + randomAlphaOfLength(8); @@ -385,7 +428,6 @@ public class HttpReadWriteHandlerTests extends ESTestCase { assertEquals(HttpRequest.HttpVersion.HTTP_1_0, nioHttpRequest.protocolVersion()); } assertEquals(nioHttpRequest.uri(), uri); - return nioHttpRequest; } private InboundChannelBuffer toChannelBuffer(ByteBuf buf) { diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java index e3259b10b97..634ea7b44af 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpClient.java @@ -116,6 +116,20 @@ class NioHttpClient implements Closeable { return responses.iterator().next(); } + public final NioSocketChannel connect(InetSocketAddress remoteAddress) { + ChannelFactory factory = new ClientChannelFactory(new CountDownLatch(0), new + ArrayList<>()); + try { + NioSocketChannel nioSocketChannel = nioGroup.openChannel(remoteAddress, factory); + PlainActionFuture connectFuture = PlainActionFuture.newFuture(); + nioSocketChannel.addConnectListener(ActionListener.toBiConsumer(connectFuture)); + connectFuture.actionGet(); + return nioSocketChannel; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + private void onException(Exception e) { logger.error("Exception from http client", e); } @@ -212,6 +226,9 @@ class NioHttpClient implements Closeable { adaptor.addCloseListener((v, e) -> channel.close()); } + @Override + public void channelRegistered() {} + @Override public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { assert message instanceof HttpRequest : "Expected type HttpRequest.class, found: " + message.getClass(); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index 2ffd5a64147..0b470fda00a 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -49,6 +50,7 @@ import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.nio.cors.NioCorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; @@ -66,6 +68,8 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.regex.PatternSyntaxException; import java.util.stream.Collectors; @@ -309,52 +313,47 @@ public class NioHttpServerTransportTests extends ESTestCase { assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); } -// public void testReadTimeout() throws Exception { -// final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { -// -// @Override -// public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { -// throw new AssertionError("Should not have received a dispatched request"); -// } -// -// @Override -// public void dispatchBadRequest(final RestRequest request, -// final RestChannel channel, -// final ThreadContext threadContext, -// final Throwable cause) { -// throw new AssertionError("Should not have received a dispatched request"); -// } -// -// }; -// -// Settings settings = Settings.builder() -// .put(HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), new TimeValue(randomIntBetween(100, 300))) -// .build(); -// -// -// NioEventLoopGroup group = new NioEventLoopGroup(); -// try (NioHttpServerTransport transport = -// new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { -// transport.start(); -// final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); -// -// AtomicBoolean channelClosed = new AtomicBoolean(false); -// -// Bootstrap clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).handler(new ChannelInitializer() { -// -// @Override -// protected void initChannel(SocketChannel ch) { -// ch.pipeline().addLast(new ChannelHandlerAdapter() {}); -// -// } -// }).group(group); -// ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); -// connect.channel().closeFuture().addListener(future -> channelClosed.set(true)); -// -// assertBusy(() -> assertTrue("Channel should be closed due to read timeout", channelClosed.get()), 5, TimeUnit.SECONDS); -// -// } finally { -// group.shutdownGracefully().await(); -// } -// } + public void testReadTimeout() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new AssertionError("Should not have received a dispatched request"); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + throw new AssertionError("Should not have received a dispatched request"); + } + + }; + + Settings settings = Settings.builder() + .put(HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), new TimeValue(randomIntBetween(100, 300))) + .build(); + + + try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, pageRecycler, + threadPool, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger))) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (NioHttpClient client = new NioHttpClient()) { + NioSocketChannel channel = null; + try { + CountDownLatch channelClosedLatch = new CountDownLatch(1); + channel = client.connect(remoteAddress.address()); + channel.addCloseListener((r, t) -> channelClosedLatch.countDown()); + assertTrue("Channel should be closed due to read timeout", channelClosedLatch.await(1, TimeUnit.MINUTES)); + } finally { + if (channel != null) { + channel.close(); + } + } + } + } + } } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java index 826bfd6585f..fac509a0e86 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportLoggingIT.java @@ -57,7 +57,7 @@ public class NioTransportLoggingIT extends NioIntegTestCase { ", action: cluster:monitor/nodes/hot_threads\\[n\\]\\]" + " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); final String readPattern = @@ -69,7 +69,7 @@ public class NioTransportLoggingIT extends NioIntegTestCase { " READ: \\d+B"; final MockLogAppender.LoggingExpectation readExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); appender.addExpectation(writeExpectation); diff --git a/qa/build.gradle b/qa/build.gradle index cbcb1d45807..f1727f11515 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -12,9 +12,11 @@ subprojects { Project subproj -> */ repositories { maven { + name "elastic" url "https://artifacts.elastic.co/maven" } maven { + name "elastic-snapshots" url "https://snapshots.elastic.co/maven" } } diff --git a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats index 466dac1b7b7..697e6456d1f 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/80_upgrade.bats @@ -60,6 +60,13 @@ setup() { install_package -v $(cat upgrade_from_version) } +@test "[UPGRADE] modify keystore" { + # deliberately modify the keystore to force it to be preserved during package upgrade + export_elasticsearch_paths + sudo -E "$ESHOME/bin/elasticsearch-keystore" remove keystore.seed + sudo -E echo keystore_seed | "$ESHOME/bin/elasticsearch-keystore" add -x keystore.seed +} + @test "[UPGRADE] start old version" { export JAVA_HOME=$SYSTEM_JAVA_HOME start_elasticsearch_service diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index f9e43bd45fc..dcbf5253bb0 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -37,7 +37,11 @@ int managementPort repositories { // the Wildfly distribution is not available via a repository, so we fake an Ivy repository on top of the download site ivy { - url "http://download.jboss.org" + name "wildfly" + url "https://download.jboss.org" + metadataSources { + artifact() + } patternLayout { artifact 'wildfly/[revision]/[module]-[revision].[ext]' } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index f6167beb199..7b1e9f425e3 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -128,6 +128,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_7_2_ID = 6070299; public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_7_3_ID = 6070399; + public static final Version V_6_7_3 = new Version(V_6_7_3_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_0_ID = 6080099; public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; @@ -165,6 +167,8 @@ public class Version implements Comparable, ToXContentFragment { return V_7_0_0; case V_6_8_0_ID: return V_6_8_0; + case V_6_7_3_ID: + return V_6_7_3; case V_6_7_1_ID: return V_6_7_1; case V_6_7_2_ID: diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index cf605117bc0..c77d3f01e5f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -66,8 +66,10 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.stream.StreamSupport; import static org.elasticsearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; @@ -736,7 +738,7 @@ public class ClusterState implements ToXContentFragment, Diffable } public Builder putCustom(String type, Custom custom) { - customs.put(type, custom); + customs.put(type, Objects.requireNonNull(custom, type)); return this; } @@ -746,6 +748,7 @@ public class ClusterState implements ToXContentFragment, Diffable } public Builder customs(ImmutableOpenMap customs) { + StreamSupport.stream(customs.spliterator(), false).forEach(cursor -> Objects.requireNonNull(cursor.value, cursor.key)); this.customs.putAll(customs); return this; } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java index b24ec795bc4..3713fffe606 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java @@ -25,17 +25,26 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Collection; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; import static java.util.Collections.unmodifiableSet; public class ClusterBlockException extends ElasticsearchException { private final Set blocks; - public ClusterBlockException(Set blocks) { - super(buildMessage(blocks)); - this.blocks = blocks; + public ClusterBlockException(Set globalLevelBlocks) { + super(buildMessageForGlobalBlocks(globalLevelBlocks)); + this.blocks = globalLevelBlocks; + } + + public ClusterBlockException(Map> indexLevelBlocks) { + super(buildMessageForIndexBlocks(indexLevelBlocks)); + this.blocks = indexLevelBlocks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()); } public ClusterBlockException(StreamInput in) throws IOException { @@ -74,10 +83,26 @@ public class ClusterBlockException extends ElasticsearchException { return blocks; } - private static String buildMessage(Set blocks) { - StringBuilder sb = new StringBuilder("blocked by: "); - for (ClusterBlock block : blocks) { - sb.append("[").append(block.status()).append("/").append(block.id()).append("/").append(block.description()).append("];"); + private static String buildMessageForGlobalBlocks(Set globalLevelBlocks) { + assert globalLevelBlocks.isEmpty() == false; + Function blockDescription = block -> block.status() + "/" + block.id() + "/" + block.description(); + StringBuilder sb = new StringBuilder(); + if (globalLevelBlocks.isEmpty() == false) { + sb.append("blocked by: ["); + sb.append(globalLevelBlocks.stream().map(blockDescription).collect(Collectors.joining(", "))); + sb.append("];"); + } + return sb.toString(); + } + + private static String buildMessageForIndexBlocks(Map> indexLevelBlocks) { + assert indexLevelBlocks.isEmpty() == false; + Function blockDescription = block -> block.status() + "/" + block.id() + "/" + block.description(); + StringBuilder sb = new StringBuilder(); + for (Map.Entry> entry : indexLevelBlocks.entrySet()) { + sb.append("index [" + entry.getKey() + "] blocked by: ["); + sb.append(entry.getValue().stream().map(blockDescription).collect(Collectors.joining(", "))); + sb.append("];"); } return sb.toString(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index c46bc291e73..a7c41f64efa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.cluster.block; - import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; @@ -28,6 +27,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -36,14 +36,11 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.function.Function; import java.util.function.Predicate; -import java.util.stream.Stream; import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableSet; import static java.util.stream.Collectors.toSet; -import static java.util.stream.Stream.concat; /** * Represents current cluster level blocks to block dirty operations done against the cluster. @@ -83,8 +80,9 @@ public class ClusterBlocks extends AbstractDiffable { return indices(level).getOrDefault(index, emptySet()); } - private static EnumMap generateLevelHolders(Set global, - ImmutableOpenMap> indicesBlocks) { + private static EnumMap generateLevelHolders( + Set global, ImmutableOpenMap> indicesBlocks) { + EnumMap levelHolders = new EnumMap<>(ClusterBlockLevel.class); for (final ClusterBlockLevel level : ClusterBlockLevel.values()) { Predicate containsLevel = block -> block.contains(level); @@ -199,13 +197,7 @@ public class ClusterBlocks extends AbstractDiffable { } public ClusterBlockException indexBlockedException(ClusterBlockLevel level, String index) { - if (!indexBlocked(level, index)) { - return null; - } - Stream blocks = concat( - global(level).stream(), - blocksForIndex(level, index).stream()); - return new ClusterBlockException(unmodifiableSet(blocks.collect(toSet()))); + return indicesBlockedException(level, new String[]{index}); } public boolean indexBlocked(ClusterBlockLevel level, String index) { @@ -213,20 +205,21 @@ public class ClusterBlocks extends AbstractDiffable { } public ClusterBlockException indicesBlockedException(ClusterBlockLevel level, String[] indices) { - boolean indexIsBlocked = false; + Set globalLevelBlocks = global(level); + Map> indexLevelBlocks = new HashMap<>(); for (String index : indices) { - if (indexBlocked(level, index)) { - indexIsBlocked = true; + Set indexBlocks = blocksForIndex(level, index); + if (indexBlocks.isEmpty() == false || globalLevelBlocks.isEmpty() == false) { + indexLevelBlocks.put(index, Sets.union(indexBlocks, globalLevelBlocks)); } } - if (globalBlocked(level) == false && indexIsBlocked == false) { + if (indexLevelBlocks.isEmpty()) { + if(globalLevelBlocks.isEmpty() == false){ + return new ClusterBlockException(globalLevelBlocks); + } return null; } - Function> blocksForIndexAtLevel = index -> blocksForIndex(level, index).stream(); - Stream blocks = concat( - global(level).stream(), - Stream.of(indices).flatMap(blocksForIndexAtLevel)); - return new ClusterBlockException(unmodifiableSet(blocks.collect(toSet()))); + return new ClusterBlockException(indexLevelBlocks); } /** @@ -235,20 +228,27 @@ public class ClusterBlocks extends AbstractDiffable { * like the deletion of an index to free up resources on nodes. * @param indices the indices to check */ + public ClusterBlockException indicesAllowReleaseResources(String[] indices) { - final Function> blocksForIndexAtLevel = index -> - blocksForIndex(ClusterBlockLevel.METADATA_WRITE, index).stream(); - Stream blocks = concat( - global(ClusterBlockLevel.METADATA_WRITE).stream(), - Stream.of(indices).flatMap(blocksForIndexAtLevel)).filter(clusterBlock -> clusterBlock.isAllowReleaseResources() == false); - Set clusterBlocks = unmodifiableSet(blocks.collect(toSet())); - if (clusterBlocks.isEmpty()) { + Set globalBlocks = global(ClusterBlockLevel.METADATA_WRITE).stream() + .filter(clusterBlock -> clusterBlock.isAllowReleaseResources() == false).collect(toSet()); + Map> indexLevelBlocks = new HashMap<>(); + for (String index : indices) { + Set blocks = Sets.union(globalBlocks, blocksForIndex(ClusterBlockLevel.METADATA_WRITE, index)) + .stream().filter(clusterBlock -> clusterBlock.isAllowReleaseResources() == false).collect(toSet()); + if (blocks.isEmpty() == false) { + indexLevelBlocks.put(index, Sets.union(globalBlocks, blocks)); + } + } + if (indexLevelBlocks.isEmpty()) { + if(globalBlocks.isEmpty() == false){ + return new ClusterBlockException(globalBlocks); + } return null; } - return new ClusterBlockException(clusterBlocks); + return new ClusterBlockException(indexLevelBlocks); } - @Override public String toString() { if (global.isEmpty() && indices().isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index a7826c88307..50361d12eb5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -502,7 +502,6 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery }); } - private void processJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinCallback) { final Optional optionalJoin = joinRequest.getOptionalJoin(); synchronized (mutex) { @@ -1027,9 +1026,10 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId())) : getLocalNode() + " should be in published " + clusterState; - final PublishRequest publishRequest = coordinationState.get().handleClientValue(clusterState); final PublicationTransportHandler.PublicationContext publicationContext = publicationHandler.newPublicationContext(clusterChangedEvent); + + final PublishRequest publishRequest = coordinationState.get().handleClientValue(clusterState); final CoordinatorPublication publication = new CoordinatorPublication(publishRequest, publicationContext, new ListenableFuture<>(), ackListener, publishListener); currentPublication = Optional.of(publication); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 54c3001d903..436de2e2e7a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -74,11 +74,13 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.StreamSupport; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; @@ -1069,7 +1071,7 @@ public class MetaData implements Iterable, Diffable, To } public Builder putCustom(String type, Custom custom) { - customs.put(type, custom); + customs.put(type, Objects.requireNonNull(custom, type)); return this; } @@ -1079,6 +1081,7 @@ public class MetaData implements Iterable, Diffable, To } public Builder customs(ImmutableOpenMap customs) { + StreamSupport.stream(customs.spliterator(), false).forEach(cursor -> Objects.requireNonNull(cursor.value, cursor.key)); this.customs.putAll(customs); return this; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index 580ea97de15..d55781c6487 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -84,8 +84,14 @@ public class FieldValueFactorFunction extends ScoreFunction { double val = value * boostFactor; double result = modifier.apply(val); if (result < 0f) { - throw new IllegalArgumentException("field value function must not produce negative scores, but got: [" + - result + "] for field value: [" + value + "]"); + String message = "field value function must not produce negative scores, but got: " + + "[" + result + "] for field value: [" + value + "]"; + if (modifier == Modifier.LN) { + message += "; consider using ln1p or ln2p instead of ln to avoid negative scores"; + } else if (modifier == Modifier.LOG) { + message += "; consider using log1p or log2p instead of log to avoid negative scores"; + } + throw new IllegalArgumentException(message); } return result; } diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java b/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java index 7434fbc9d8b..4e8f871ed30 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkUtils.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.network; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; +import java.io.IOException; import java.net.Inet4Address; import java.net.Inet6Address; import java.net.InetAddress; @@ -33,6 +34,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Optional; +import java.util.function.Predicate; /** * Utilities for network interfaces / addresses binding and publishing. @@ -150,77 +152,53 @@ public abstract class NetworkUtils { return Constants.WINDOWS ? false : true; } - /** Returns all interface-local scope (loopback) addresses for interfaces that are up. */ - static InetAddress[] getLoopbackAddresses() throws SocketException { - List list = new ArrayList<>(); - for (NetworkInterface intf : getInterfaces()) { - if (intf.isUp()) { - for (InetAddress address : Collections.list(intf.getInetAddresses())) { - if (address.isLoopbackAddress()) { - list.add(address); - } - } - } - } - if (list.isEmpty()) { - throw new IllegalArgumentException("No up-and-running loopback addresses found, got " + getInterfaces()); - } - return list.toArray(new InetAddress[list.size()]); - } - - /** Returns all site-local scope (private) addresses for interfaces that are up. */ - static InetAddress[] getSiteLocalAddresses() throws SocketException { - List list = new ArrayList<>(); - for (NetworkInterface intf : getInterfaces()) { - if (intf.isUp()) { - for (InetAddress address : Collections.list(intf.getInetAddresses())) { - if (address.isSiteLocalAddress()) { - list.add(address); - } - } - } - } - if (list.isEmpty()) { - throw new IllegalArgumentException("No up-and-running site-local (private) addresses found, got " + getInterfaces()); - } - return list.toArray(new InetAddress[list.size()]); - } - - /** Returns all global scope addresses for interfaces that are up. */ - static InetAddress[] getGlobalAddresses() throws SocketException { - List list = new ArrayList<>(); - for (NetworkInterface intf : getInterfaces()) { - if (intf.isUp()) { - for (InetAddress address : Collections.list(intf.getInetAddresses())) { - if (address.isLoopbackAddress() == false && - address.isSiteLocalAddress() == false && - address.isLinkLocalAddress() == false) { - list.add(address); - } - } - } - } - if (list.isEmpty()) { - throw new IllegalArgumentException("No up-and-running global-scope (public) addresses found, got " + getInterfaces()); - } - return list.toArray(new InetAddress[list.size()]); - } - - /** Returns all addresses (any scope) for interfaces that are up. - * This is only used to pick a publish address, when the user set network.host to a wildcard */ - static InetAddress[] getAllAddresses() throws SocketException { - List list = new ArrayList<>(); - for (NetworkInterface intf : getInterfaces()) { - if (intf.isUp()) { - for (InetAddress address : Collections.list(intf.getInetAddresses())) { + private static InetAddress[] filterAllAddresses(final Predicate predicate, final String message) throws IOException { + final List interfaces = getInterfaces(); + final List list = new ArrayList<>(); + for (final NetworkInterface intf : interfaces) { + for (final InetAddress address : Collections.list(intf.getInetAddresses())) { + if (predicate.test(address) && isUp(intf)) { list.add(address); } } } if (list.isEmpty()) { - throw new IllegalArgumentException("No up-and-running addresses found, got " + getInterfaces()); + throw new IllegalArgumentException(message + ", got " + interfaces); } - return list.toArray(new InetAddress[list.size()]); + return list.toArray(new InetAddress[0]); + } + + private static boolean isUp(final NetworkInterface intf) throws IOException { + try { + return intf.isUp(); + } catch (final SocketException e) { + throw new IOException("failed to check if interface [" + intf.getName() + "] is up", e); + } + } + + /** Returns all interface-local scope (loopback) addresses for interfaces that are up. */ + static InetAddress[] getLoopbackAddresses() throws IOException { + return filterAllAddresses(InetAddress::isLoopbackAddress, "no up-and-running loopback addresses found"); + } + + /** Returns all site-local scope (private) addresses for interfaces that are up. */ + static InetAddress[] getSiteLocalAddresses() throws IOException { + return filterAllAddresses(InetAddress::isSiteLocalAddress, "No up-and-running site-local (private) addresses found"); + } + + /** Returns all global scope addresses for interfaces that are up. */ + static InetAddress[] getGlobalAddresses() throws IOException { + return filterAllAddresses( + address -> address.isLoopbackAddress() == false + && address.isSiteLocalAddress() == false + && address.isLinkLocalAddress() == false, + "no up-and-running global-scope (public) addresses found"); + } + + /** Returns all addresses (any scope) for interfaces that are up. + * This is only used to pick a publish address, when the user set network.host to a wildcard */ + static InetAddress[] getAllAddresses() throws IOException { + return filterAllAddresses(address -> true, "no up-and-running addresses found"); } /** Returns addresses for the given interface (it must be marked up) */ diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java index 3deb5f19c95..19a453f7e90 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java @@ -34,6 +34,7 @@ public class KeyStoreCli extends LoggingAwareMultiCommand { subcommands.put("add", new AddStringKeyStoreCommand()); subcommands.put("add-file", new AddFileKeyStoreCommand()); subcommands.put("remove", new RemoveSettingKeyStoreCommand()); + subcommands.put("upgrade", new UpgradeKeyStoreCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 2ae90a868af..64cdd7165f2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -100,7 +100,7 @@ public class KeyStoreWrapper implements SecureSettings { private static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; /** The version of the metadata written before the keystore data. */ - private static final int FORMAT_VERSION = 4; + static final int FORMAT_VERSION = 4; /** The oldest metadata format version that can be read. */ private static final int MIN_FORMAT_VERSION = 1; diff --git a/server/src/main/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommand.java b/server/src/main/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommand.java new file mode 100644 index 00000000000..6338f40ea05 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommand.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import joptsimple.OptionSet; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +/** + * A sub-command for the keystore CLI that enables upgrading the keystore format. + */ +public class UpgradeKeyStoreCommand extends EnvironmentAwareCommand { + + UpgradeKeyStoreCommand() { + super("Upgrade the keystore format"); + } + + @Override + protected void execute(final Terminal terminal, final OptionSet options, final Environment env) throws Exception { + final KeyStoreWrapper wrapper = KeyStoreWrapper.load(env.configFile()); + if (wrapper == null) { + throw new UserException( + ExitCodes.CONFIG, + "keystore does not exist at [" + KeyStoreWrapper.keystorePath(env.configFile()) + "]"); + } + wrapper.decrypt(new char[0]); + KeyStoreWrapper.upgrade(wrapper, env.configFile(), new char[0]); + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 027d360153f..330681e2624 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -110,7 +110,7 @@ public class DateFormatters { .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) .optionalStart() - .appendFraction(NANO_OF_SECOND, 3, 9, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .optionalEnd() .optionalEnd() .optionalStart() @@ -178,7 +178,7 @@ public class DateFormatters { /** * Returns a ISO 8601 compatible date time formatter and parser. * This is not fully compatible to the existing spec, which would require far more edge cases, but merely compatible with the - * existing joda time ISO data formater + * existing joda time ISO date formatter */ private static final DateFormatter ISO_8601 = new JavaDateFormatter("iso8601", STRICT_DATE_OPTIONAL_TIME_PRINTER, new DateTimeFormatterBuilder() @@ -201,6 +201,8 @@ public class DateFormatters { .appendFraction(NANO_OF_SECOND, 1, 9, false) .optionalEnd() .optionalEnd() + .optionalEnd() + .optionalEnd() .optionalStart() .appendZoneOrOffsetId() .optionalEnd() @@ -208,8 +210,6 @@ public class DateFormatters { .append(TIME_ZONE_FORMATTER_NO_COLON) .optionalEnd() .optionalEnd() - .optionalEnd() - .optionalEnd() .toFormatter(Locale.ROOT)); ///////////////////////////////////////// diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java index 14d1ad5b646..ac917e72b2d 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutor.java @@ -148,11 +148,17 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto assert super.unwrap(r) instanceof TimedRunnable : "expected only TimedRunnables in queue"; final TimedRunnable timedRunnable = (TimedRunnable) super.unwrap(r); final long taskNanos = timedRunnable.getTotalNanos(); + final boolean failedOrRejected = timedRunnable.getFailedOrRejected(); final long totalNanos = totalTaskNanos.addAndGet(taskNanos); final long taskExecutionNanos = timedRunnable.getTotalExecutionNanos(); - assert taskExecutionNanos >= 0 : "expected task to always take longer than 0 nanoseconds, got: " + taskExecutionNanos; - executionEWMA.addValue(taskExecutionNanos); + assert taskExecutionNanos >= 0 || (failedOrRejected && taskExecutionNanos == -1) : + "expected task to always take longer than 0 nanoseconds or have '-1' failure code, got: " + taskExecutionNanos + + ", failedOrRejected: " + failedOrRejected; + if (taskExecutionNanos != -1) { + // taskExecutionNanos may be -1 if the task threw an exception + executionEWMA.addValue(taskExecutionNanos); + } if (taskCount.incrementAndGet() == this.tasksPerFrame) { final long endTimeNs = System.nanoTime(); diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java index b6b9ef1ad05..f2de68453a6 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/TimedRunnable.java @@ -30,6 +30,7 @@ class TimedRunnable extends AbstractRunnable implements WrappedRunnable { private final long creationTimeNanos; private long startTimeNanos; private long finishTimeNanos = -1; + private boolean failedOrRejected = false; TimedRunnable(final Runnable original) { this.original = original; @@ -48,6 +49,7 @@ class TimedRunnable extends AbstractRunnable implements WrappedRunnable { @Override public void onRejection(final Exception e) { + this.failedOrRejected = true; if (original instanceof AbstractRunnable) { ((AbstractRunnable) original).onRejection(e); } else { @@ -64,6 +66,7 @@ class TimedRunnable extends AbstractRunnable implements WrappedRunnable { @Override public void onFailure(final Exception e) { + this.failedOrRejected = true; if (original instanceof AbstractRunnable) { ((AbstractRunnable) original).onFailure(e); } else { @@ -100,6 +103,14 @@ class TimedRunnable extends AbstractRunnable implements WrappedRunnable { return Math.max(finishTimeNanos - startTimeNanos, 1); } + /** + * If the task was failed or rejected, return true. + * Otherwise, false. + */ + boolean getFailedOrRejected() { + return this.failedOrRejected; + } + @Override public Runnable unwrap() { return original; diff --git a/server/src/main/java/org/elasticsearch/discovery/FileBasedSeedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/FileBasedSeedHostsProvider.java index 3af83e36311..8e0192f58e7 100644 --- a/server/src/main/java/org/elasticsearch/discovery/FileBasedSeedHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/FileBasedSeedHostsProvider.java @@ -75,7 +75,7 @@ public class FileBasedSeedHostsProvider implements SeedHostsProvider { @Override public List getSeedAddresses(HostsResolver hostsResolver) { - final List transportAddresses = hostsResolver.resolveHosts(getHostsList(), 1); + final List transportAddresses = hostsResolver.resolveHosts(getHostsList()); logger.debug("seed addresses: {}", transportAddresses); return transportAddresses; } diff --git a/server/src/main/java/org/elasticsearch/discovery/SeedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/SeedHostsProvider.java index 12eb11e3686..4811d13d2d9 100644 --- a/server/src/main/java/org/elasticsearch/discovery/SeedHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/SeedHostsProvider.java @@ -36,10 +36,9 @@ public interface SeedHostsProvider { /** * Helper object that allows to resolve a list of hosts to a list of transport addresses. - * Each host is resolved into a transport address (or a collection of addresses if the - * number of ports is greater than one) + * Each host is resolved into a transport address */ interface HostsResolver { - List resolveHosts(List hosts, int limitPortCounts); + List resolveHosts(List hosts); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java b/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java index 926216b9b68..61a0b213d63 100644 --- a/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java +++ b/server/src/main/java/org/elasticsearch/discovery/SeedHostsResolver.java @@ -116,7 +116,6 @@ public class SeedHostsResolver extends AbstractLifecycleComponent implements Con * @param executorService the executor service used to parallelize hostname lookups * @param logger logger used for logging messages regarding hostname lookups * @param hosts the hosts to resolve - * @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport) * @param transportService the transport service * @param resolveTimeout the timeout before returning from hostname lookups * @return a list of resolved transport addresses @@ -125,7 +124,6 @@ public class SeedHostsResolver extends AbstractLifecycleComponent implements Con final ExecutorService executorService, final Logger logger, final List hosts, - final int limitPortCounts, final TransportService transportService, final TimeValue resolveTimeout) { Objects.requireNonNull(executorService); @@ -140,7 +138,7 @@ public class SeedHostsResolver extends AbstractLifecycleComponent implements Con final List> callables = hosts .stream() - .map(hn -> (Callable) () -> transportService.addressesFromString(hn, limitPortCounts)) + .map(hn -> (Callable) () -> transportService.addressesFromString(hn)) .collect(Collectors.toList()); final List> futures; try { @@ -224,9 +222,8 @@ public class SeedHostsResolver extends AbstractLifecycleComponent implements Con } List providedAddresses - = hostsProvider.getSeedAddresses((hosts, limitPortCounts) - -> resolveHostsLists(executorService.get(), logger, hosts, limitPortCounts, - transportService, resolveTimeout)); + = hostsProvider.getSeedAddresses(hosts -> + resolveHostsLists(executorService.get(), logger, hosts, transportService, resolveTimeout)); consumer.accept(providedAddresses); } diff --git a/server/src/main/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProvider.java index d0c8a5c65df..676577ebb4f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProvider.java @@ -50,12 +50,7 @@ public class SettingsBasedSeedHostsProvider implements SeedHostsProvider { public static final Setting> DISCOVERY_SEED_HOSTS_SETTING = Setting.listSetting("discovery.seed_hosts", emptyList(), Function.identity(), Property.NodeScope); - // these limits are per-address - private static final int LIMIT_FOREIGN_PORTS_COUNT = 1; - private static final int LIMIT_LOCAL_PORTS_COUNT = 5; - private final List configuredHosts; - private final int limitPortCounts; public SettingsBasedSeedHostsProvider(Settings settings, TransportService transportService) { if (LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.exists(settings)) { @@ -66,15 +61,11 @@ public class SettingsBasedSeedHostsProvider implements SeedHostsProvider { } configuredHosts = LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings); // we only limit to 1 address, makes no sense to ping 100 ports - limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; } else if (DISCOVERY_SEED_HOSTS_SETTING.exists(settings)) { configuredHosts = DISCOVERY_SEED_HOSTS_SETTING.get(settings); - // we only limit to 1 address, makes no sense to ping 100 ports - limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT; } else { // if unicast hosts are not specified, fill with simple defaults on the local machine - configuredHosts = transportService.getLocalAddresses(); - limitPortCounts = LIMIT_LOCAL_PORTS_COUNT; + configuredHosts = transportService.getDefaultSeedAddresses(); } logger.debug("using initial hosts {}", configuredHosts); @@ -82,6 +73,6 @@ public class SettingsBasedSeedHostsProvider implements SeedHostsProvider { @Override public List getSeedAddresses(HostsResolver hostsResolver) { - return hostsResolver.resolveHosts(configuredHosts, limitPortCounts); + return hostsResolver.resolveHosts(configuredHosts); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index d6b6f00311b..f918e254f80 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -144,8 +144,7 @@ public class UnicastZenPing implements ZenPing { } private SeedHostsProvider.HostsResolver createHostsResolver() { - return (hosts, limitPortCounts) -> SeedHostsResolver.resolveHostsLists(unicastZenPingExecutorService, logger, hosts, - limitPortCounts, transportService, resolveTimeout); + return hosts -> SeedHostsResolver.resolveHostsLists(unicastZenPingExecutorService, logger, hosts, transportService, resolveTimeout); } @Override diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index b79a5e77309..ab6168cad52 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -249,7 +249,7 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo return publishPort; } - protected void onException(HttpChannel channel, Exception e) { + public void onException(HttpChannel channel, Exception e) { if (lifecycle.started() == false) { // just close and ignore - we are already stopped and just need to make sure we release all resources CloseableChannel.closeChannel(channel); @@ -263,6 +263,9 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo logger.trace(() -> new ParameterizedMessage( "connect exception caught while handling client http traffic, closing connection {}", channel), e); CloseableChannel.closeChannel(channel); + } else if (e instanceof HttpReadTimeoutException) { + logger.trace(() -> new ParameterizedMessage("http read timeout, closing connection {}", channel), e); + CloseableChannel.closeChannel(channel); } else if (e instanceof CancelledKeyException) { logger.trace(() -> new ParameterizedMessage( "cancelled key exception caught while handling client http traffic, closing connection {}", channel), e); diff --git a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java index 568f2912a67..805ebc3d95d 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java @@ -29,6 +29,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUN import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; @@ -43,11 +44,12 @@ public class HttpHandlingSettings { private final int compressionLevel; private final boolean detailedErrorsEnabled; private final int pipeliningMaxEvents; + private final long readTimeoutMillis; private boolean corsEnabled; public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeaderSize, int maxInitialLineLength, boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled, - int pipeliningMaxEvents, boolean corsEnabled) { + int pipeliningMaxEvents, long readTimeoutMillis, boolean corsEnabled) { this.maxContentLength = maxContentLength; this.maxChunkSize = maxChunkSize; this.maxHeaderSize = maxHeaderSize; @@ -57,6 +59,7 @@ public class HttpHandlingSettings { this.compressionLevel = compressionLevel; this.detailedErrorsEnabled = detailedErrorsEnabled; this.pipeliningMaxEvents = pipeliningMaxEvents; + this.readTimeoutMillis = readTimeoutMillis; this.corsEnabled = corsEnabled; } @@ -70,6 +73,7 @@ public class HttpHandlingSettings { SETTING_HTTP_COMPRESSION_LEVEL.get(settings), SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), SETTING_PIPELINING_MAX_EVENTS.get(settings), + SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis(), SETTING_CORS_ENABLED.get(settings)); } @@ -109,6 +113,10 @@ public class HttpHandlingSettings { return pipeliningMaxEvents; } + public long getReadTimeoutMillis() { + return readTimeoutMillis; + } + public boolean isCorsEnabled() { return corsEnabled; } diff --git a/server/src/main/java/org/elasticsearch/http/HttpReadTimeoutException.java b/server/src/main/java/org/elasticsearch/http/HttpReadTimeoutException.java new file mode 100644 index 00000000000..a4f54e92f34 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpReadTimeoutException.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +public class HttpReadTimeoutException extends RuntimeException { + + public HttpReadTimeoutException(long readTimeoutMillis) { + super("http read timeout after " + readTimeoutMillis + "ms"); + + } + + public HttpReadTimeoutException(long readTimeoutMillis, Exception cause) { + super("http read timeout after " + readTimeoutMillis + "ms", cause); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index f0cafe2e0f0..38fea26facc 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2568,6 +2568,10 @@ public class InternalEngine extends Engine { @Override public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { final long currentLocalCheckpoint = getLocalCheckpointTracker().getCheckpoint(); + // avoid scanning translog if not necessary + if (startingSeqNo > currentLocalCheckpoint) { + return true; + } final LocalCheckpointTracker tracker = new LocalCheckpointTracker(startingSeqNo, startingSeqNo - 1); try (Translog.Snapshot snapshot = getTranslog().newSnapshotFromMinSeqNo(startingSeqNo)) { Translog.Operation operation; diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 5cf62dbbf7c..5acac256dbd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -137,7 +137,7 @@ public class ReadOnlyEngine extends Engine { + "] from last commit does not match global checkpoint [" + seqNoStats.getGlobalCheckpoint() + "]"); } } - assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getMaxSeqNo()); + assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); } protected boolean assertMaxSeqNoEqualsToGlobalCheckpoint(final long maxSeqNo, final long globalCheckpoint) { @@ -300,7 +300,8 @@ public class ReadOnlyEngine extends Engine { @Override public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException { - return false; + // we can do operation-based recovery if we don't have to replay any operation. + return startingSeqNo > seqNoStats.getMaxSeqNo(); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java index 712d7019674..875abaed4bd 100644 --- a/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/WrapperQueryBuilder.java @@ -45,8 +45,8 @@ import java.util.Arrays; *
  * 
  *      BoolQueryBuilder bool = new BoolQueryBuilder();
- *      bool.must(new WrapperQueryBuilder("{\"term\": {\"field\":\"value\"}}");
- *      bool.must(new TermQueryBuilder("field2","value2");
+ *      bool.must(new WrapperQueryBuilder("{\"term\": {\"field\":\"value\"}}"));
+ *      bool.must(new TermQueryBuilder("field2","value2"));
  * 
  * 
*/ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index e71572b3474..ee67597efe3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -777,11 +777,25 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl try { if (logger.isTraceEnabled()) { // don't use index.source().utf8ToString() here source might not be valid UTF-8 - logger.trace("index [{}][{}] seq# [{}] allocation-id {}", - index.type(), index.id(), index.seqNo(), routingEntry().allocationId()); + logger.trace("index [{}][{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}]", + index.type(), index.id(), index.seqNo(), routingEntry().allocationId(), index.primaryTerm(), getOperationPrimaryTerm(), + index.origin()); } result = engine.index(index); + if (logger.isTraceEnabled()) { + logger.trace("index-done [{}][{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}] " + + "result-seq# [{}] result-term [{}] failure [{}]", + index.type(), index.id(), index.seqNo(), routingEntry().allocationId(), index.primaryTerm(), getOperationPrimaryTerm(), + index.origin(), result.getSeqNo(), result.getTerm(), result.getFailure()); + } } catch (Exception e) { + if (logger.isTraceEnabled()) { + logger.trace(new ParameterizedMessage( + "index-fail [{}][{}] seq# [{}] allocation-id [{}] primaryTerm [{}] operationPrimaryTerm [{}] origin [{}]", + index.type(), index.id(), index.seqNo(), routingEntry().allocationId(), index.primaryTerm(), getOperationPrimaryTerm(), + index.origin() + ), e); + } indexingOperationListeners.postIndex(shardId, index, e); throw e; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 5c1120452f6..7f296daa5d1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -75,7 +75,7 @@ public class AggregatorFactories { final String aggregationName = parser.currentName(); if (!validAggMatcher.reset(aggregationName).matches()) { throw new ParsingException(parser.getTokenLocation(), "Invalid aggregation name [" + aggregationName - + "]. Aggregation names must be alpha-numeric and can only contain '_' and '-'"); + + "]. Aggregation names can contain any character except '[', ']', and '>'"); } token = parser.nextToken(); diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 8f933b28d94..82d3528695c 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -282,6 +282,10 @@ public class QueryPhase implements SearchPhase { } finally { searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); } + if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER + && queryResult.terminatedEarly() == null) { + queryResult.terminatedEarly(false); + } final QuerySearchResult result = searchContext.queryResult(); for (QueryCollectorContext ctx : collectors) { diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index df97d2e548e..df3e1da5dce 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -237,7 +237,17 @@ public class ThreadPool implements Scheduler, Closeable { * timestamp, see {@link #absoluteTimeInMillis()}. */ public long relativeTimeInMillis() { - return cachedTimeThread.relativeTimeInMillis(); + return TimeValue.nsecToMSec(relativeTimeInNanos()); + } + + /** + * Returns a value of nanoseconds that may be used for relative time calculations. + * + * This method should only be used for calculating time deltas. For an epoch based + * timestamp, see {@link #absoluteTimeInMillis()}. + */ + public long relativeTimeInNanos() { + return cachedTimeThread.relativeTimeInNanos(); } /** @@ -534,30 +544,29 @@ public class ThreadPool implements Scheduler, Closeable { final long interval; volatile boolean running = true; - volatile long relativeMillis; + volatile long relativeNanos; volatile long absoluteMillis; CachedTimeThread(String name, long interval) { super(name); this.interval = interval; - this.relativeMillis = TimeValue.nsecToMSec(System.nanoTime()); + this.relativeNanos = System.nanoTime(); this.absoluteMillis = System.currentTimeMillis(); setDaemon(true); } /** - * Return the current time used for relative calculations. This is - * {@link System#nanoTime()} truncated to milliseconds. + * Return the current time used for relative calculations. This is {@link System#nanoTime()}. *

* If {@link ThreadPool#ESTIMATED_TIME_INTERVAL_SETTING} is set to 0 * then the cache is disabled and the method calls {@link System#nanoTime()} * whenever called. Typically used for testing. */ - long relativeTimeInMillis() { + long relativeTimeInNanos() { if (0 < interval) { - return relativeMillis; + return relativeNanos; } - return TimeValue.nsecToMSec(System.nanoTime()); + return System.nanoTime(); } /** @@ -578,7 +587,7 @@ public class ThreadPool implements Scheduler, Closeable { @Override public void run() { while (running && 0 < interval) { - relativeMillis = TimeValue.nsecToMSec(System.nanoTime()); + relativeNanos = System.nanoTime(); absoluteMillis = System.currentTimeMillis(); try { Thread.sleep(interval); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 42d61301635..eef9f4f4263 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -86,6 +86,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; @@ -102,6 +103,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); private static final BytesReference EMPTY_BYTES_REFERENCE = new BytesArray(new byte[0]); + // this limit is per-address + private static final int LIMIT_LOCAL_PORTS_COUNT = 6; + protected final Settings settings; protected final ThreadPool threadPool; protected final PageCacheRecycler pageCacheRecycler; @@ -311,14 +315,20 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements } @Override - public List getLocalAddresses() { + public List getDefaultSeedAddresses() { List local = new ArrayList<>(); local.add("127.0.0.1"); // check if v6 is supported, if so, v4 will also work via mapped addresses. if (NetworkUtils.SUPPORTS_V6) { local.add("[::1]"); // may get ports appended! } - return local; + return local.stream() + .flatMap( + address -> Arrays.stream(defaultPortRange()) + .limit(LIMIT_LOCAL_PORTS_COUNT) + .mapToObj(port -> address + ":" + port) + ) + .collect(Collectors.toList()); } protected void bindServer(ProfileSettings profileSettings) { @@ -456,8 +466,17 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - return parse(address, settings.get("transport.profiles.default.port", TransportSettings.PORT.get(settings)), perAddressLimit); + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { + return parse(address, defaultPortRange()[0]); + } + + private int[] defaultPortRange() { + return new PortsRange( + settings.get( + TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), + TransportSettings.PORT.get(settings) + ) + ).ports(); } // this code is a take on guava's HostAndPort, like a HostAndPortRange @@ -467,9 +486,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$"); /** - * parse a hostname+port range spec into its equivalent addresses + * parse a hostname+port spec into its equivalent addresses */ - static TransportAddress[] parse(String hostPortString, String defaultPortRange, int perAddressLimit) throws UnknownHostException { + static TransportAddress[] parse(String hostPortString, int defaultPort) throws UnknownHostException { Objects.requireNonNull(hostPortString); String host; String portString = null; @@ -498,22 +517,18 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements } } + int port; // if port isn't specified, fill with the default if (portString == null || portString.isEmpty()) { - portString = defaultPortRange; + port = defaultPort; + } else { + port = Integer.parseInt(portString); } - // generate address for each port in the range - Set addresses = new HashSet<>(Arrays.asList(InetAddress.getAllByName(host))); - List transportAddresses = new ArrayList<>(); - int[] ports = new PortsRange(portString).ports(); - int limit = Math.min(ports.length, perAddressLimit); - for (int i = 0; i < limit; i++) { - for (InetAddress address : addresses) { - transportAddresses.add(new TransportAddress(address, ports[i])); - } - } - return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]); + return Arrays.stream(InetAddress.getAllByName(host)) + .distinct() + .map(address -> new TransportAddress(address, port)) + .toArray(TransportAddress[]::new); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java index eea8ce0f2ff..0b79b6aecf0 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transport.java +++ b/server/src/main/java/org/elasticsearch/transport/Transport.java @@ -68,12 +68,12 @@ public interface Transport extends LifecycleComponent { /** * Returns an address from its string representation. */ - TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException; + TransportAddress[] addressesFromString(String address) throws UnknownHostException; /** - * Returns a list of all local adresses for this transport + * Returns a list of all local addresses for this transport */ - List getLocalAddresses(); + List getDefaultSeedAddresses(); default CircuitBreaker getInFlightRequestBreaker() { return new NoopCircuitBreaker("in-flight-noop"); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index c8493edc979..90fd7c1847b 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -313,8 +313,8 @@ public class TransportService extends AbstractLifecycleComponent implements Tran return transport.boundAddress(); } - public List getLocalAddresses() { - return transport.getLocalAddresses(); + public List getDefaultSeedAddresses() { + return transport.getDefaultSeedAddresses(); } /** @@ -750,8 +750,8 @@ public class TransportService extends AbstractLifecycleComponent implements Tran return true; } - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - return transport.addressesFromString(address, perAddressLimit); + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { + return transport.addressesFromString(address); } /** diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index d6b15f3df43..b195662dcf2 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -89,6 +89,11 @@ grant codeBase "${codebase.httpasyncclient}" { permission java.net.NetPermission "getProxySelector"; }; +grant codeBase "${codebase.junit-rt.jar}" { + // allows IntelliJ IDEA JUnit test runner to control number of test iterations + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; +}; + grant codeBase "file:${gradle.dist.lib}/-" { // gradle test worker code needs a slew of permissions, we give full access here since gradle isn't a production // dependency and there's no point in exercising the security policy against it @@ -104,4 +109,4 @@ grant codeBase "file:${gradle.worker.jar}" { grant { // since the gradle test worker jar is on the test classpath, our tests should be able to read it permission java.io.FilePermission "${gradle.worker.jar}", "read"; -}; \ No newline at end of file +}; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java index 63cfc5da43b..81ff1e36c0b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.delete; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.settings.Settings; @@ -63,7 +64,22 @@ public class DeleteIndexBlocksIT extends ESIntegTestCase { } } - public void testDeleteIndexOnReadOnlyAllowDeleteSetting() { + public void testClusterBlockMessageHasIndexName() { + try { + createIndex("test"); + ensureGreen("test"); + Settings settings = Settings.builder().put(IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE, true).build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); + ClusterBlockException e = expectThrows(ClusterBlockException.class, () -> + client().prepareIndex().setIndex("test").setType("doc").setId("1").setSource("foo", "bar").get()); + assertEquals("index [test] blocked by: [FORBIDDEN/12/index read-only / allow delete (api)];", e.getMessage()); + } finally { + assertAcked(client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull(IndexMetaData.SETTING_READ_ONLY_ALLOW_DELETE).build()).get()); + } + } + + public void testDeleteIndexOnClusterReadOnlyAllowDeleteSetting() { createIndex("test"); ensureGreen("test"); client().prepareIndex().setIndex("test").setType("doc").setId("1").setSource("foo", "bar").get(); diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 9e7a28a8792..8aca4d7105e 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -287,7 +287,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { action.new AsyncAction(null, request, listener).start(); fail("expected ClusterBlockException"); } catch (ClusterBlockException expected) { - assertEquals("blocked by: [SERVICE_UNAVAILABLE/1/test-block];", expected.getMessage()); + assertEquals("index [" + TEST_INDEX + "] blocked by: [SERVICE_UNAVAILABLE/1/test-block];", expected.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index 9a74282d51f..a636dc8471d 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/server/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -170,7 +170,7 @@ abstract class FailAndRetryMockTransport imp } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java index bdcaf80ee19..9e13dbaa89b 100644 --- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java @@ -128,7 +128,7 @@ public class TransportClientNodesServiceTests extends ESTestCase { threadPool = new TestThreadPool("transport-client-nodes-service-tests"); transport = new FailAndRetryMockTransport(random(), clusterName) { @Override - public List getLocalAddresses() { + public List getDefaultSeedAddresses() { return Collections.emptyList(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 3138fb19b6c..86d3eec9c9c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -21,11 +21,13 @@ package org.elasticsearch.cluster; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class ClusterStateTests extends ESTestCase { @@ -56,4 +58,19 @@ public class ClusterStateTests extends ESTestCase { // state from the same master compare by version assertThat(withMaster1a.supersedes(withMaster1b), equalTo(withMaster1a.version() > withMaster1b.version())); } + + public void testBuilderRejectsNullCustom() { + final ClusterState.Builder builder = ClusterState.builder(ClusterName.DEFAULT); + final String key = randomAlphaOfLength(10); + assertThat(expectThrows(NullPointerException.class, () -> builder.putCustom(key, null)).getMessage(), containsString(key)); + } + + public void testBuilderRejectsNullInCustoms() { + final ClusterState.Builder builder = ClusterState.builder(ClusterName.DEFAULT); + final String key = randomAlphaOfLength(10); + final ImmutableOpenMap.Builder mapBuilder = ImmutableOpenMap.builder(); + mapBuilder.put(key, null); + final ImmutableOpenMap map = mapBuilder.build(); + assertThat(expectThrows(NullPointerException.class, () -> builder.customs(map)).getMessage(), containsString(key)); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index 25179427d86..193cde3180d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -401,7 +401,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) { + public TransportAddress[] addressesFromString(String address) { return new TransportAddress[0]; } @@ -440,7 +440,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { } @Override - public List getLocalAddresses() { + public List getDefaultSeedAddresses() { return null; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 101b5755b4c..3667e5c9762 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskListener; @@ -54,6 +55,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -63,6 +65,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; import org.elasticsearch.discovery.zen.PublishClusterStateStats; @@ -94,6 +97,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -136,6 +140,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -1150,6 +1155,67 @@ public class CoordinatorTests extends ESTestCase { cluster.stabilise(); } + private static class BrokenCustom extends AbstractDiffable implements ClusterState.Custom { + + static final String EXCEPTION_MESSAGE = "simulated"; + + @Override + public String getWriteableName() { + return "broken"; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_EMPTY; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new ElasticsearchException(EXCEPTION_MESSAGE); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + } + + public void testClusterRecoversAfterExceptionDuringSerialization() { + final Cluster cluster = new Cluster(randomIntBetween(2, 5)); // 1-node cluster doesn't do any serialization + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader1 = cluster.getAnyLeader(); + + logger.info("--> submitting broken task to [{}]", leader1); + + final AtomicBoolean failed = new AtomicBoolean(); + leader1.submitUpdateTask("broken-task", + cs -> ClusterState.builder(cs).putCustom("broken", new BrokenCustom()).build(), + (source, e) -> { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(e.getCause().getMessage(), equalTo(BrokenCustom.EXCEPTION_MESSAGE)); + failed.set(true); + }); + cluster.runFor(DEFAULT_DELAY_VARIABILITY + 1, "processing broken task"); + assertTrue(failed.get()); + + cluster.stabilise(); + + final ClusterNode leader2 = cluster.getAnyLeader(); + long finalValue = randomLong(); + + logger.info("--> submitting value [{}] to [{}]", finalValue, leader2); + leader2.submitValue(finalValue); + cluster.stabilise(DEFAULT_CLUSTER_STATE_UPDATE_DELAY); + + for (final ClusterNode clusterNode : cluster.clusterNodes) { + final String nodeId = clusterNode.getId(); + final ClusterState appliedState = clusterNode.getLastAppliedClusterState(); + assertThat(nodeId + " has the applied value", value(appliedState), is(finalValue)); + } + } + private static long defaultMillis(Setting setting) { return setting.get(Settings.EMPTY).millis() + Cluster.DEFAULT_DELAY_VARIABILITY; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java index 38e93082892..a854d403557 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/LinearizabilityChecker.java @@ -19,7 +19,10 @@ package org.elasticsearch.cluster.coordination; import com.carrotsearch.hppc.LongObjectHashMap; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import java.util.ArrayList; @@ -32,7 +35,11 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Queue; import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; import java.util.function.Function; /** @@ -44,6 +51,8 @@ import java.util.function.Function; */ public class LinearizabilityChecker { + private static final Logger logger = LogManager.getLogger(LinearizabilityChecker.class); + /** * Sequential specification of a datatype. Used as input for the linearizability checker. * All parameter and return values should be immutable and have proper equals / hashCode implementations @@ -114,12 +123,17 @@ public class LinearizabilityChecker { * Sequence of invocations and responses, recording the run of a concurrent system. */ public static class History { - private final List events; - private int nextId; + private final Queue events; + private AtomicInteger nextId = new AtomicInteger(); public History() { - events = new ArrayList<>(); - nextId = 0; + events = new ConcurrentLinkedQueue<>(); + } + + public History(Collection events) { + this(); + this.events.addAll(events); + this.nextId.set(events.stream().mapToInt(e -> e.id).max().orElse(-1) + 1); } /** @@ -129,7 +143,7 @@ public class LinearizabilityChecker { * @return an id that can be used to record the corresponding response event */ public int invoke(Object input) { - final int id = nextId++; + final int id = nextId.getAndIncrement(); events.add(new Event(EventType.INVOCATION, input, id)); return id; } @@ -153,6 +167,13 @@ public class LinearizabilityChecker { events.removeIf(e -> e.id == id); } + /** + * Copy the list of events for external use. + * @return list of events in the order recorded. + */ + public List copyEvents() { + return new ArrayList<>(events); + } /** * Completes the history with response events for invocations that are missing corresponding responses * @@ -177,10 +198,7 @@ public class LinearizabilityChecker { @Override public History clone() { - final History history = new History(); - history.events.addAll(events); - history.nextId = nextId; - return history; + return new History(events); } /** @@ -197,6 +215,7 @@ public class LinearizabilityChecker { ", nextId=" + nextId + '}'; } + } /** @@ -210,15 +229,16 @@ public class LinearizabilityChecker { public boolean isLinearizable(SequentialSpec spec, History history, Function missingResponseGenerator) { history = history.clone(); // clone history before completing it history.complete(missingResponseGenerator); // complete history - final Collection> partitions = spec.partition(history.events); + final Collection> partitions = spec.partition(history.copyEvents()); return partitions.stream().allMatch(h -> isLinearizable(spec, h)); } private boolean isLinearizable(SequentialSpec spec, List history) { + logger.debug("Checking history of size: {}: {}", history.size(), history); Object state = spec.initialState(); // the current state of the datatype final FixedBitSet linearized = new FixedBitSet(history.size() / 2); // the linearized prefix of the history - final Cache cache = new Cache(); + final Cache cache = new Cache(); // cache of explored pairs final Deque> calls = new LinkedList<>(); // path we're currently exploring final Entry headEntry = createLinkedEntries(history); @@ -267,6 +287,54 @@ public class LinearizabilityChecker { }); } + /** + * Return a visual representation of the history + */ + public static String visualize(SequentialSpec spec, History history, Function missingResponseGenerator) { + history = history.clone(); + history.complete(missingResponseGenerator); + final Collection> partitions = spec.partition(history.copyEvents()); + StringBuilder builder = new StringBuilder(); + partitions.forEach(new Consumer>() { + int index = 0; + @Override + public void accept(List events) { + builder.append("Partition " ).append(index++).append("\n"); + builder.append(visualizePartition(events)); + } + }); + + return builder.toString(); + } + + private static String visualizePartition(List events) { + StringBuilder builder = new StringBuilder(); + Entry entry = createLinkedEntries(events).next; + Map, Integer> eventToPosition = new HashMap<>(); + for (Event event : events) { + eventToPosition.put(Tuple.tuple(event.type, event.id), eventToPosition.size()); + } + while (entry != null) { + if (entry.match != null) { + builder.append(visualizeEntry(entry, eventToPosition)).append("\n"); + } + entry = entry.next; + } + return builder.toString(); + } + + private static String visualizeEntry(Entry entry, Map, Integer> eventToPosition) { + String input = String.valueOf(entry.event.value); + String output = String.valueOf(entry.match.event.value); + int id = entry.event.id; + int beginIndex = eventToPosition.get(Tuple.tuple(EventType.INVOCATION, id)); + int endIndex = eventToPosition.get(Tuple.tuple(EventType.RESPONSE, id)); + input = input.substring(0, Math.min(beginIndex + 25, input.length())); + return Strings.padStart(input, beginIndex + 25, ' ') + + " " + Strings.padStart("", endIndex-beginIndex, 'X') + " " + + output + " (" + entry.event.id + ")"; + } + /** * Creates the internal linked data structure used by the linearizability checker. * Generates contiguous internal ids for the events so that they can be efficiently recorded in bit sets. @@ -314,7 +382,7 @@ public class LinearizabilityChecker { return first; } - enum EventType { + public enum EventType { INVOCATION, RESPONSE } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 685b7cca98a..e9893f16f95 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -50,6 +50,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.startsWith; @@ -891,4 +892,19 @@ public class MetaDataTests extends ESTestCase { .transientSettings(Settings.builder().put(setting.getKey(), "transient-value").build()).build(); assertThat(setting.get(metaData.settings()), equalTo("transient-value")); } + + public void testBuilderRejectsNullCustom() { + final MetaData.Builder builder = MetaData.builder(); + final String key = randomAlphaOfLength(10); + assertThat(expectThrows(NullPointerException.class, () -> builder.putCustom(key, null)).getMessage(), containsString(key)); + } + + public void testBuilderRejectsNullInCustoms() { + final MetaData.Builder builder = MetaData.builder(); + final String key = randomAlphaOfLength(10); + final ImmutableOpenMap.Builder mapBuilder = ImmutableOpenMap.builder(); + mapBuilder.put(key, null); + final ImmutableOpenMap map = mapBuilder.build(); + assertThat(expectThrows(NullPointerException.class, () -> builder.customs(map)).getMessage(), containsString(key)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index c3a541fe87e..061d83c9c38 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -733,14 +733,21 @@ public class JavaJodaTimeDuellingTests extends ESTestCase { JodaDateFormatter jodaFormatter = new JodaDateFormatter(format, isoFormatter, isoFormatter); DateFormatter javaFormatter = DateFormatter.forPattern(format); + assertSameDate("2018-10-10", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10+0430", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11-08:00", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11Z", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12+0100", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12.123", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12.123Z", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12.123+0000", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12,123", format, jodaFormatter, javaFormatter); assertSameDate("2018-10-10T10:11:12,123Z", format, jodaFormatter, javaFormatter); + assertSameDate("2018-10-10T10:11:12,123+05:30", format, jodaFormatter, javaFormatter); } public void testParsingMissingTimezone() { diff --git a/server/src/test/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommandTests.java b/server/src/test/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommandTests.java new file mode 100644 index 00000000000..ec9a1432539 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/settings/UpgradeKeyStoreCommandTests.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasToString; + +public class UpgradeKeyStoreCommandTests extends KeyStoreCommandTestCase { + + @Override + protected Command newCommand() { + return new UpgradeKeyStoreCommand() { + + @Override + protected Environment createEnv(final Map settings) { + return env; + } + + }; + } + + public void testKeystoreUpgrade() throws Exception { + final Path keystore = KeyStoreWrapper.keystorePath(env.configFile()); + try (InputStream is = KeyStoreWrapperTests.class.getResourceAsStream("/format-v3-elasticsearch.keystore"); + OutputStream os = Files.newOutputStream(keystore)) { + final byte[] buffer = new byte[4096]; + int read; + while ((read = is.read(buffer, 0, buffer.length)) >= 0) { + os.write(buffer, 0, read); + } + } + try (KeyStoreWrapper beforeUpgrade = KeyStoreWrapper.load(env.configFile())) { + assertNotNull(beforeUpgrade); + assertThat(beforeUpgrade.getFormatVersion(), equalTo(3)); + } + execute(); + try (KeyStoreWrapper afterUpgrade = KeyStoreWrapper.load(env.configFile())) { + assertNotNull(afterUpgrade); + assertThat(afterUpgrade.getFormatVersion(), equalTo(KeyStoreWrapper.FORMAT_VERSION)); + afterUpgrade.decrypt(new char[0]); + assertThat(afterUpgrade.getSettingNames(), hasItem(KeyStoreWrapper.SEED_SETTING.getKey())); + } + } + + public void testKeystoreDoesNotExist() { + final UserException e = expectThrows(UserException.class, this::execute); + assertThat(e, hasToString(containsString("keystore does not exist at [" + KeyStoreWrapper.keystorePath(env.configFile()) + "]"))); + } + +} diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index 3c97d27fe78..8f2a6616643 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -198,6 +198,58 @@ public class DateFormattersTests extends ESTestCase { formatter.format(formatter.parse("2018-05-15T17:14:56.123456789+01:00")); } + public void testIso8601Parsing() { + DateFormatter formatter = DateFormatters.forPattern("iso8601"); + + // timezone not allowed with just date + formatter.format(formatter.parse("2018-05-15")); + + formatter.format(formatter.parse("2018-05-15T17")); + formatter.format(formatter.parse("2018-05-15T17Z")); + formatter.format(formatter.parse("2018-05-15T17+0100")); + formatter.format(formatter.parse("2018-05-15T17+01:00")); + + formatter.format(formatter.parse("2018-05-15T17:14")); + formatter.format(formatter.parse("2018-05-15T17:14Z")); + formatter.format(formatter.parse("2018-05-15T17:14-0100")); + formatter.format(formatter.parse("2018-05-15T17:14-01:00")); + + formatter.format(formatter.parse("2018-05-15T17:14:56")); + formatter.format(formatter.parse("2018-05-15T17:14:56Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56+01:00")); + + // milliseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123-0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123+01:00")); + + // microseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456+01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456-0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456-01:00")); + + // nanoseconds can be separated using comma or decimal point + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789-0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56.123456789-01:00")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789Z")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+0100")); + formatter.format(formatter.parse("2018-05-15T17:14:56,123456789+01:00")); + } + public void testRoundupFormatterWithEpochDates() { assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second @@ -260,4 +312,44 @@ public class DateFormattersTests extends ESTestCase { String formatted = formatter.formatMillis(clock.millis()); assertThat(formatted, is("2019-02-08T11:43:00.000Z")); } + + public void testFractionalSeconds() { + DateFormatter formatter = DateFormatters.forPattern("strict_date_optional_time"); + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.1Z")); + assertThat(instant.getNano(), is(100_000_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.12Z")); + assertThat(instant.getNano(), is(120_000_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.123Z")); + assertThat(instant.getNano(), is(123_000_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.1234Z")); + assertThat(instant.getNano(), is(123_400_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.12345Z")); + assertThat(instant.getNano(), is(123_450_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.123456Z")); + assertThat(instant.getNano(), is(123_456_000)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.1234567Z")); + assertThat(instant.getNano(), is(123_456_700)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.12345678Z")); + assertThat(instant.getNano(), is(123_456_780)); + } + { + Instant instant = Instant.from(formatter.parse("2019-05-06T14:52:37.123456789Z")); + assertThat(instant.getNano(), is(123_456_789)); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java index 8e4c729ee9c..6b5f7d95700 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/QueueResizingEsThreadPoolExecutorTests.java @@ -226,16 +226,43 @@ public class QueueResizingEsThreadPoolExecutorTests extends ESTestCase { context.close(); } + /** Use a runnable wrapper that simulates a task with unknown failures. */ + public void testExceptionThrowingTask() throws Exception { + ThreadContext context = new ThreadContext(Settings.EMPTY); + ResizableBlockingQueue queue = + new ResizableBlockingQueue<>(ConcurrentCollections.newBlockingQueue(), + 100); + + QueueResizingEsThreadPoolExecutor executor = + new QueueResizingEsThreadPoolExecutor( + "test-threadpool", 1, 1, 1000, + TimeUnit.MILLISECONDS, queue, 10, 200, exceptionalWrapper(), 10, TimeValue.timeValueMillis(1), + EsExecutors.daemonThreadFactory("queuetest"), new EsAbortPolicy(), context); + executor.prestartAllCoreThreads(); + logger.info("--> executor: {}", executor); + + assertThat((long)executor.getTaskExecutionEWMA(), equalTo(0L)); + executeTask(executor, 1); + executor.shutdown(); + executor.awaitTermination(10, TimeUnit.SECONDS); + context.close(); + } + private Function fastWrapper() { - return (runnable) -> { - return new SettableTimedRunnable(TimeUnit.NANOSECONDS.toNanos(100)); - }; + return (runnable) -> new SettableTimedRunnable(TimeUnit.NANOSECONDS.toNanos(100), false); } private Function slowWrapper() { - return (runnable) -> { - return new SettableTimedRunnable(TimeUnit.MINUTES.toNanos(2)); - }; + return (runnable) -> new SettableTimedRunnable(TimeUnit.MINUTES.toNanos(2), false); + } + + /** + * The returned function outputs a WrappedRunnabled that simulates the case + * where {@link TimedRunnable#getTotalExecutionNanos()} returns -1 because + * the job failed or was rejected before it finished. + */ + private Function exceptionalWrapper() { + return (runnable) -> new SettableTimedRunnable(TimeUnit.NANOSECONDS.toNanos(-1), true); } /** Execute a blank task {@code times} times for the executor */ @@ -248,10 +275,12 @@ public class QueueResizingEsThreadPoolExecutorTests extends ESTestCase { public class SettableTimedRunnable extends TimedRunnable { private final long timeTaken; + private final boolean testFailedOrRejected; - public SettableTimedRunnable(long timeTaken) { + public SettableTimedRunnable(long timeTaken, boolean failedOrRejected) { super(() -> {}); this.timeTaken = timeTaken; + this.testFailedOrRejected = failedOrRejected; } @Override @@ -263,5 +292,10 @@ public class QueueResizingEsThreadPoolExecutorTests extends ESTestCase { public long getTotalExecutionNanos() { return timeTaken; } + + @Override + public boolean getFailedOrRejected() { + return testFailedOrRejected; + } } } diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index f60e108c34c..5e86bd1bcbb 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -66,6 +66,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; @@ -134,6 +135,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { final List exceptedExceptions = new CopyOnWriteArrayList<>(); final ConflictMode conflictMode = ConflictMode.randomMode(); + final List fieldNames = IntStream.rangeClosed(0, randomInt(10)).mapToObj(n -> "f" + n).collect(Collectors.toList()); logger.info("starting indexers using conflict mode " + conflictMode); try { @@ -156,7 +158,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { int shard = Math.floorMod(Murmur3HashFunction.hash(id), numPrimaries); logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard); IndexRequestBuilder indexRequestBuilder = client.prepareIndex("test", "type", id) - .setSource("{}", XContentType.JSON) + .setSource(Collections.singletonMap(randomFrom(fieldNames), randomNonNegativeLong()), XContentType.JSON) .setTimeout(timeout); if (conflictMode == ConflictMode.external) { @@ -459,7 +461,9 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase { while (stopped.get() == false && docID.get() < 5000) { String id = Integer.toString(docID.incrementAndGet()); try { - IndexResponse response = client().prepareIndex(index, "_doc", id).setSource("{}", XContentType.JSON).get(); + IndexResponse response = client().prepareIndex(index, "_doc", id) + .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) + .get(); assertThat(response.getResult(), isOneOf(CREATED, UPDATED)); logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); ackedDocs.add(response.getId()); diff --git a/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java index fc89bd1a2f3..ccd929d5ade 100644 --- a/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/FileBasedSeedHostsProviderTests.java @@ -115,9 +115,8 @@ public class FileBasedSeedHostsProviderTests extends ESTestCase { public void testUnicastHostsDoesNotExist() { final FileBasedSeedHostsProvider provider = new FileBasedSeedHostsProvider(createTempDir().toAbsolutePath()); - final List addresses = provider.getSeedAddresses((hosts, limitPortCounts) -> - SeedHostsResolver.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, - TimeValue.timeValueSeconds(10))); + final List addresses = provider.getSeedAddresses(hosts -> + SeedHostsResolver.resolveHostsLists(executorService, logger, hosts, transportService, TimeValue.timeValueSeconds(10))); assertEquals(0, addresses.size()); } @@ -145,8 +144,7 @@ public class FileBasedSeedHostsProviderTests extends ESTestCase { writer.write(String.join("\n", hostEntries)); } - return new FileBasedSeedHostsProvider(configPath).getSeedAddresses((hosts, limitPortCounts) -> - SeedHostsResolver.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService, - TimeValue.timeValueSeconds(10))); + return new FileBasedSeedHostsProvider(configPath).getSeedAddresses(hosts -> + SeedHostsResolver.resolveHostsLists(executorService, logger, hosts, transportService, TimeValue.timeValueSeconds(10))); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java index 0506f5c48e8..451548bd743 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/SeedHostsResolverTests.java @@ -147,47 +147,6 @@ public class SeedHostsResolverTests extends ESTestCase { assertThat(resolvedAddressesRef.get(), equalTo(transportAddresses)); } - public void testPortLimit() { - final NetworkService networkService = new NetworkService(Collections.emptyList()); - final Transport transport = new MockNioTransport( - Settings.EMPTY, - Version.CURRENT, - threadPool, - networkService, - PageCacheRecycler.NON_RECYCLING_INSTANCE, - new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService()) { - - @Override - public BoundTransportAddress boundAddress() { - return new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9500)}, - new TransportAddress(InetAddress.getLoopbackAddress(), 9500) - ); - } - }; - closeables.push(transport); - final TransportService transportService = - new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet()); - closeables.push(transportService); - final int limitPortCounts = randomIntBetween(1, 10); - final List transportAddresses = SeedHostsResolver.resolveHostsLists( - executorService, - logger, - Collections.singletonList("127.0.0.1"), - limitPortCounts, - transportService, - TimeValue.timeValueSeconds(30)); - assertThat(transportAddresses, hasSize(limitPortCounts)); - final Set ports = new HashSet<>(); - for (final TransportAddress address : transportAddresses) { - assertTrue(address.address().getAddress().isLoopbackAddress()); - ports.add(address.getPort()); - } - assertThat(ports, equalTo(IntStream.range(9300, 9300 + limitPortCounts).mapToObj(m -> m).collect(Collectors.toSet()))); - } - public void testRemovingLocalAddresses() { final NetworkService networkService = new NetworkService(Collections.emptyList()); final InetAddress loopbackAddress = InetAddress.getLoopbackAddress(); @@ -219,8 +178,9 @@ public class SeedHostsResolverTests extends ESTestCase { final List transportAddresses = SeedHostsResolver.resolveHostsLists( executorService, logger, - Collections.singletonList(NetworkAddress.format(loopbackAddress)), - 10, + IntStream.range(9300, 9310) + .mapToObj(port -> NetworkAddress.format(loopbackAddress) + ":" + port) + .collect(Collectors.toList()), transportService, TimeValue.timeValueSeconds(30)); assertThat(transportAddresses, hasSize(7)); @@ -255,7 +215,7 @@ public class SeedHostsResolverTests extends ESTestCase { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { throw unknownHostException; } @@ -271,7 +231,6 @@ public class SeedHostsResolverTests extends ESTestCase { executorService, logger, Arrays.asList(hostname), - 1, transportService, TimeValue.timeValueSeconds(30) ); @@ -302,7 +261,7 @@ public class SeedHostsResolverTests extends ESTestCase { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { if ("hostname1".equals(address)) { return new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)}; } else if ("hostname2".equals(address)) { @@ -330,7 +289,6 @@ public class SeedHostsResolverTests extends ESTestCase { executorService, logger, Arrays.asList("hostname1", "hostname2"), - 1, transportService, resolveTimeout); @@ -373,7 +331,6 @@ public class SeedHostsResolverTests extends ESTestCase { executorService, logger, Arrays.asList("127.0.0.1:9300:9300", "127.0.0.1:9301"), - 1, transportService, TimeValue.timeValueSeconds(30)); assertThat(transportAddresses, hasSize(1)); // only one of the two is valid and will be used diff --git a/server/src/test/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProviderTests.java b/server/src/test/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProviderTests.java index f001d69ecc8..c5f7303bd0d 100644 --- a/server/src/test/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/SettingsBasedSeedHostsProviderTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; -import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportService; @@ -38,18 +37,15 @@ public class SettingsBasedSeedHostsProviderTests extends ESTestCase { private class AssertingHostsResolver implements HostsResolver { private final Set expectedHosts; - private final int expectedPortCount; private boolean resolvedHosts; - AssertingHostsResolver(int expectedPortCount, String... expectedHosts) { - this.expectedPortCount = expectedPortCount; + AssertingHostsResolver(String... expectedHosts) { this.expectedHosts = Sets.newHashSet(expectedHosts); } @Override - public List resolveHosts(List hosts, int limitPortCounts) { - assertEquals(expectedPortCount, limitPortCounts); + public List resolveHosts(List hosts) { assertEquals(expectedHosts, Sets.newHashSet(hosts)); resolvedHosts = true; return emptyList(); @@ -61,15 +57,19 @@ public class SettingsBasedSeedHostsProviderTests extends ESTestCase { } public void testScansPortsByDefault() { - final AssertingHostsResolver hostsResolver = new AssertingHostsResolver(5, "::1", "127.0.0.1"); + final AssertingHostsResolver hostsResolver = new AssertingHostsResolver( + "[::1]:9300", "[::1]:9301", "127.0.0.1:9300", "127.0.0.1:9301" + ); final TransportService transportService = mock(TransportService.class); - when(transportService.getLocalAddresses()).thenReturn(Arrays.asList("::1", "127.0.0.1")); + when(transportService.getDefaultSeedAddresses()).thenReturn( + Arrays.asList("[::1]:9300", "[::1]:9301", "127.0.0.1:9300", "127.0.0.1:9301") + ); new SettingsBasedSeedHostsProvider(Settings.EMPTY, transportService).getSeedAddresses(hostsResolver); assertTrue(hostsResolver.getResolvedHosts()); } public void testGetsHostsFromSetting() { - final AssertingHostsResolver hostsResolver = new AssertingHostsResolver(1, "bar", "foo"); + final AssertingHostsResolver hostsResolver = new AssertingHostsResolver("bar", "foo"); new SettingsBasedSeedHostsProvider(Settings.builder() .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey(), "foo", "bar") .build(), null).getSeedAddresses(hostsResolver); @@ -77,7 +77,7 @@ public class SettingsBasedSeedHostsProviderTests extends ESTestCase { } public void testGetsHostsFromLegacySetting() { - final AssertingHostsResolver hostsResolver = new AssertingHostsResolver(1, "bar", "foo"); + final AssertingHostsResolver hostsResolver = new AssertingHostsResolver("bar", "foo"); new SettingsBasedSeedHostsProvider(Settings.builder() .putList(SettingsBasedSeedHostsProvider.LEGACY_DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), "foo", "bar") .build(), null).getSeedAddresses(hostsResolver); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index ef2c30fe5c8..5035d7b3378 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -4384,7 +4384,7 @@ public class InternalEngineTests extends EngineTestCase { Randomness.shuffle(seqNos); final EngineConfig engineConfig; final SeqNoStats prevSeqNoStats; - final List prevDocs; + final List prevDocs; final int totalTranslogOps; try (InternalEngine engine = createEngine(store, createTempDir(), globalCheckpoint::get)) { engineConfig = engine.config(); @@ -5491,7 +5491,7 @@ public class InternalEngineTests extends EngineTestCase { commits.add(new ArrayList<>()); try (Store store = createStore()) { EngineConfig config = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); - final List docs; + final List docs; try (InternalEngine engine = createEngine(config)) { List flushedOperations = new ArrayList<>(); for (Engine.Operation op : operations) { @@ -5538,7 +5538,7 @@ public class InternalEngineTests extends EngineTestCase { final IndexSettings softDeletesEnabled = IndexSettingsModule.newIndexSettings( IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(Settings.builder(). put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)).build()); - final List docs; + final List docs; try (InternalEngine engine = createEngine( config(softDeletesEnabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get))) { List ops = generateHistoryOnReplica(between(1, 100), randomBoolean(), randomBoolean(), randomBoolean()); diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index f179cd840c6..d1840c4d97c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -36,6 +36,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -274,7 +275,14 @@ public class LuceneChangesSnapshotTests extends EngineTestCase { pullOperations(engine); } assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService); - assertThat(getDocIds(engine, true), equalTo(getDocIds(leader, true))); + // have to verify without source since we are randomly testing without _source + List docsWithoutSourceOnFollower = getDocIds(engine, true).stream() + .map(d -> new DocIdSeqNoAndSource(d.getId(), null, d.getSeqNo(), d.getPrimaryTerm(), d.getVersion())) + .collect(Collectors.toList()); + List docsWithoutSourceOnLeader = getDocIds(leader, true).stream() + .map(d -> new DocIdSeqNoAndSource(d.getId(), null, d.getSeqNo(), d.getPrimaryTerm(), d.getVersion())) + .collect(Collectors.toList()); + assertThat(docsWithoutSourceOnFollower, equalTo(docsWithoutSourceOnLeader)); } catch (Exception ex) { throw new AssertionError(ex); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index 024a6c3402f..f9437ac9251 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -44,7 +44,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); int numDocs = scaledRandomIntBetween(10, 1000); final SeqNoStats lastSeqNoStats; - final List lastDocIds; + final List lastDocIds; try (InternalEngine engine = createEngine(config)) { Engine.Get get = null; for (int i = 0; i < numDocs; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/query/FullTextQueryTestCase.java b/server/src/test/java/org/elasticsearch/index/query/FullTextQueryTestCase.java new file mode 100644 index 00000000000..7f9d85e3e26 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/FullTextQueryTestCase.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; + +public abstract class FullTextQueryTestCase> extends AbstractQueryTestCase { + protected abstract boolean isCacheable(QB queryBuilder); + + /** + * Full text queries that start with "now" are not cacheable if they + * target a {@link DateFieldMapper.DateFieldType} field. + */ + protected final boolean isCacheable(Collection fields, String value) { + if (value.length() < 3 + || value.substring(0, 3).equalsIgnoreCase("now") == false) { + return true; + } + Set dateFields = new HashSet<>(); + getMapping().forEach(ft -> { + if (ft instanceof DateFieldMapper.DateFieldType) { + dateFields.add(ft.name()); + } + }); + for (MappedFieldType ft : getMapping()) { + if (ft instanceof DateFieldMapper.DateFieldType) { + dateFields.add(ft.name()); + } + } + if (fields.isEmpty()) { + // special case: all fields are requested + return dateFields.isEmpty(); + } + return fields.stream() + .anyMatch(dateFields::contains) == false; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index e9f2b447da1..a7aad3dbc3e 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.index.search.MatchQuery.Type; import org.elasticsearch.index.search.MatchQuery.ZeroTermsQuery; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matcher; import java.io.IOException; @@ -56,13 +55,19 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import static java.util.Collections.singletonList; import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; -public class MatchQueryBuilderTests extends AbstractQueryTestCase { +public class MatchQueryBuilderTests extends FullTextQueryTestCase { + @Override + protected boolean isCacheable(MatchQueryBuilder queryBuilder) { + return queryBuilder.fuzziness() != null + || isCacheable(singletonList(queryBuilder.fieldName()), queryBuilder.value().toString()); + } @Override protected MatchQueryBuilder doCreateTestQueryBuilder() { diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 7ca722fc311..ab9b3c73213 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.MultiMatchQueryBuilder.Type; import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; import java.util.Arrays; @@ -59,11 +58,16 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.collection.IsCollectionWithSize.hasSize; -public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase { - +public class MultiMatchQueryBuilderTests extends FullTextQueryTestCase { private static final String MISSING_WILDCARD_FIELD_NAME = "missing_*"; private static final String MISSING_FIELD_NAME = "missing"; + @Override + protected boolean isCacheable(MultiMatchQueryBuilder queryBuilder) { + return queryBuilder.fuzziness() != null + || isCacheable(queryBuilder.fields().keySet(), queryBuilder.value().toString()); + } + @Override protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { String fieldName = randomFrom(STRING_FIELD_NAME, INT_FIELD_NAME, DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 9ca77ae3e1b..a49f0a5755c 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -62,7 +62,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.search.QueryStringQueryParser; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.CoreMatchers; import org.hamcrest.Matchers; @@ -84,7 +83,12 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; -public class QueryStringQueryBuilderTests extends AbstractQueryTestCase { +public class QueryStringQueryBuilderTests extends FullTextQueryTestCase { + @Override + protected boolean isCacheable(QueryStringQueryBuilder queryBuilder) { + return queryBuilder.fuzziness() != null + || isCacheable(queryBuilder.fields().keySet(), queryBuilder.queryString()); + } @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 2bb289ddc11..daed696f02f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -44,7 +44,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.search.SimpleQueryStringQueryParser; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; import java.util.ArrayList; @@ -65,7 +64,11 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase { +public class SimpleQueryStringBuilderTests extends FullTextQueryTestCase { + @Override + protected boolean isCacheable(SimpleQueryStringBuilder queryBuilder) { + return isCacheable(queryBuilder.fields().keySet(), queryBuilder.value()); + } @Override protected SimpleQueryStringBuilder doCreateTestQueryBuilder() { @@ -107,11 +110,6 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase= 3 && queryText.substring(0,3).equalsIgnoreCase("now")) { - fields.put(STRING_FIELD_NAME_2, 2.0f / randomIntBetween(1, 20)); - } result.fields(fields); if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index c99886c3f66..94f26d8ed8f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -72,6 +72,7 @@ import java.util.concurrent.ExecutionException; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsNot.not; public class FunctionScoreTests extends ESTestCase { @@ -824,7 +825,6 @@ public class FunctionScoreTests extends ESTestCase { assertThat(exc.getMessage(), containsString("function score query returned an invalid score: " + Float.NEGATIVE_INFINITY)); } - public void testExceptionOnNegativeScores() { IndexSearcher localSearcher = new IndexSearcher(reader); TermQuery termQuery = new TermQuery(new Term(FIELD, "out")); @@ -836,6 +836,34 @@ public class FunctionScoreTests extends ESTestCase { new FunctionScoreQuery(termQuery, fvfFunction, CombineFunction.REPLACE, null, Float.POSITIVE_INFINITY); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> localSearcher.search(fsQuery1, 1)); assertThat(exc.getMessage(), containsString("field value function must not produce negative scores")); + assertThat(exc.getMessage(), not(containsString("consider using ln1p or ln2p instead of ln to avoid negative scores"))); + assertThat(exc.getMessage(), not(containsString("consider using log1p or log2p instead of log to avoid negative scores"))); + } + + public void testExceptionOnLnNegativeScores() { + IndexSearcher localSearcher = new IndexSearcher(reader); + TermQuery termQuery = new TermQuery(new Term(FIELD, "out")); + + // test that field_value_factor function using modifier ln throws an exception on negative scores + FieldValueFactorFunction.Modifier modifier = FieldValueFactorFunction.Modifier.LN; + final ScoreFunction fvfFunction = new FieldValueFactorFunction(FIELD, 0.5f, modifier, 1.0, new IndexNumericFieldDataStub()); + FunctionScoreQuery fsQuery1 = + new FunctionScoreQuery(termQuery, fvfFunction, CombineFunction.REPLACE, null, Float.POSITIVE_INFINITY); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> localSearcher.search(fsQuery1, 1)); + assertThat(exc.getMessage(), containsString("consider using ln1p or ln2p instead of ln to avoid negative scores")); + } + + public void testExceptionOnLogNegativeScores() { + IndexSearcher localSearcher = new IndexSearcher(reader); + TermQuery termQuery = new TermQuery(new Term(FIELD, "out")); + + // test that field_value_factor function using modifier log throws an exception on negative scores + FieldValueFactorFunction.Modifier modifier = FieldValueFactorFunction.Modifier.LOG; + final ScoreFunction fvfFunction = new FieldValueFactorFunction(FIELD, 0.5f, modifier, 1.0, new IndexNumericFieldDataStub()); + FunctionScoreQuery fsQuery1 = + new FunctionScoreQuery(termQuery, fvfFunction, CombineFunction.REPLACE, null, Float.POSITIVE_INFINITY); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> localSearcher.search(fsQuery1, 1)); + assertThat(exc.getMessage(), containsString("consider using log1p or log2p instead of log to avoid negative scores")); } private static class DummyScoreFunction extends ScoreFunction { diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index de6ab82892f..d499cf6e83f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineFactory; @@ -770,7 +770,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC } } shards.refresh("test"); - List docsBelowGlobalCheckpoint = EngineTestCase.getDocIds(getEngine(newPrimary), randomBoolean()) + List docsBelowGlobalCheckpoint = EngineTestCase.getDocIds(getEngine(newPrimary), randomBoolean()) .stream().filter(doc -> doc.getSeqNo() <= newPrimary.getGlobalCheckpoint()).collect(Collectors.toList()); CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean done = new AtomicBoolean(); @@ -780,7 +780,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC latch.countDown(); while (done.get() == false) { try { - List exposedDocs = EngineTestCase.getDocIds(getEngine(randomFrom(replicas)), randomBoolean()); + List exposedDocs = EngineTestCase.getDocIds(getEngine(randomFrom(replicas)), randomBoolean()); assertThat(docsBelowGlobalCheckpoint, everyItem(isIn(exposedDocs))); assertThat(randomFrom(replicas).getLocalCheckpoint(), greaterThanOrEqualTo(initDocs - 1L)); } catch (AlreadyClosedException ignored) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index a434c62b7c2..45e44ebbc47 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -79,7 +79,7 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.CommitStats; -import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.DeleteResult; import org.elasticsearch.index.engine.EngineException; @@ -3664,7 +3664,7 @@ public class IndexShardTests extends IndexShardTestCase { while (done.get() == false) { try { List exposedDocIds = EngineTestCase.getDocIds(getEngine(shard), rarely()) - .stream().map(DocIdSeqNoAndTerm::getId).collect(Collectors.toList()); + .stream().map(DocIdSeqNoAndSource::getId).collect(Collectors.toList()); assertThat("every operations before the global checkpoint must be reserved", docBelowGlobalCheckpoint, everyItem(isIn(exposedDocIds))); } catch (AlreadyClosedException ignored) { diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index 8ee7d430b0a..e3808175f76 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -22,25 +22,31 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static java.util.Collections.emptySet; @@ -50,9 +56,11 @@ import static org.elasticsearch.search.internal.SearchContext.TRACK_TOTAL_HITS_A import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class CloseIndexIT extends ESIntegTestCase { @@ -341,6 +349,81 @@ public class CloseIndexIT extends ESIntegTestCase { assertIndexIsClosed(indexName); } + public void testNoopPeerRecoveriesWhenIndexClosed() throws Exception { + final String indexName = "noop-peer-recovery-test"; + int numberOfReplicas = between(1, 2); + internalCluster().ensureAtLeastNumDataNodes(numberOfReplicas + between(1, 2)); + createIndex(indexName, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put("index.routing.rebalance.enable", "none") + .build()); + int iterations = between(1, 3); + for (int iter = 0; iter < iterations; iter++) { + indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) + .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())); + ensureGreen(indexName); + + // Closing an index should execute noop peer recovery + assertAcked(client().admin().indices().prepareClose(indexName).get()); + assertIndexIsClosed(indexName); + ensureGreen(indexName); + assertNoFileBasedRecovery(indexName); + internalCluster().assertSameDocIdsOnShards(); + + // Open a closed index should execute noop recovery + assertAcked(client().admin().indices().prepareOpen(indexName).get()); + assertIndexIsOpened(indexName); + ensureGreen(indexName); + assertNoFileBasedRecovery(indexName); + internalCluster().assertSameDocIdsOnShards(); + } + } + + /** + * Ensures that if a replica of a closed index does not have the same content as the primary, then a file-based recovery will occur. + */ + public void testRecoverExistingReplica() throws Exception { + final String indexName = "test-recover-existing-replica"; + internalCluster().ensureAtLeastNumDataNodes(2); + List dataNodes = randomSubsetOf(2, Sets.newHashSet( + clusterService().state().nodes().getDataNodes().valuesIt()).stream().map(DiscoveryNode::getName).collect(Collectors.toSet())); + createIndex(indexName, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.include._name", String.join(",", dataNodes)) + .build()); + indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) + .mapToObj(n -> client().prepareIndex(indexName, "_doc").setSource("num", n)).collect(toList())); + ensureGreen(indexName); + if (randomBoolean()) { + client().admin().indices().prepareFlush(indexName).get(); + } else { + client().admin().indices().prepareSyncedFlush(indexName).get(); + } + // index more documents while one shard copy is offline + internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + Client client = client(dataNodes.get(0)); + int moreDocs = randomIntBetween(1, 50); + for (int i = 0; i < moreDocs; i++) { + client.prepareIndex(indexName, "_doc").setSource("num", i).get(); + } + assertAcked(client.admin().indices().prepareClose(indexName)); + return super.onNodeStopped(nodeName); + } + }); + assertIndexIsClosed(indexName); + ensureGreen(indexName); + internalCluster().assertSameDocIdsOnShards(); + for (RecoveryState recovery : client().admin().indices().prepareRecoveries(indexName).get().shardRecoveryStates().get(indexName)) { + if (recovery.getPrimary() == false) { + assertThat(recovery.getIndex().fileDetails(), not(empty())); + } + } + } + static void assertIndexIsClosed(final String... indices) { final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); for (String index : indices) { @@ -386,4 +469,12 @@ public class CloseIndexIT extends ESIntegTestCase { fail("Unexpected exception: " + t); } } + + void assertNoFileBasedRecovery(String indexName) { + for (RecoveryState recovery : client().admin().indices().prepareRecoveries(indexName).get().shardRecoveryStates().get(indexName)) { + if (recovery.getPrimary() == false) { + assertThat(recovery.getIndex().fileDetails(), empty()); + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index c0839284364..6f0419421b8 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -174,7 +174,6 @@ public class NodeTests extends ESTestCase { shouldRun.set(false); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41448") public void testCloseOnInterruptibleTask() throws Exception { Node node = new MockNode(baseSettings().build(), basePlugins()); node.start(); diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 16b18efe623..69d5f095948 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -345,11 +345,23 @@ public class QueryPhaseTests extends IndexShardTestCase { TestSearchContext context = new TestSearchContext(null, indexShard); context.setTask(new SearchTask(123L, "", "", "", null, Collections.emptyMap())); context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); - context.terminateAfter(1); final IndexReader reader = DirectoryReader.open(dir); IndexSearcher contextSearcher = new IndexSearcher(reader); + context.terminateAfter(numDocs); + { + context.setSize(10); + TotalHitCountCollector collector = new TotalHitCountCollector(); + context.queryCollectors().put(TotalHitCountCollector.class, collector); + QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); + assertFalse(context.queryResult().terminatedEarly()); + assertThat(context.queryResult().topDocs().topDocs.totalHits.value, equalTo((long) numDocs)); + assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(10)); + assertThat(collector.getTotalHits(), equalTo(numDocs)); + } + + context.terminateAfter(1); { context.setSize(1); QueryPhase.execute(context, contextSearcher, checkCancelled -> {}); @@ -411,7 +423,6 @@ public class QueryPhaseTests extends IndexShardTestCase { assertThat(context.queryResult().topDocs().topDocs.scoreDocs.length, equalTo(0)); assertThat(collector.getTotalHits(), equalTo(1)); } - reader.close(); dir.close(); } diff --git a/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 3e546d17202..74b0408636f 100644 --- a/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -250,7 +250,7 @@ public class SimpleSearchIT extends ESIntegTestCase { .setTerminateAfter(2 * max).get(); assertHitCount(searchResponse, max); - assertNull(searchResponse.isTerminatedEarly()); + assertFalse(searchResponse.isTerminatedEarly()); } public void testSimpleIndexSortEarlyTerminate() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 5788655bb3e..ee6b56e81fe 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -2424,7 +2424,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39828") public void testCloseOrDeleteIndexDuringSnapshot() throws Exception { Client client = client(); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 4519513db28..80d183e499e 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -19,14 +19,25 @@ package org.elasticsearch.transport; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.hamcrest.Matcher; import java.io.IOException; import java.io.StreamCorruptedException; +import java.net.InetSocketAddress; +import java.util.Collections; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.core.IsInstanceOf.instanceOf; /** Unit tests for {@link TcpTransport} */ @@ -34,50 +45,26 @@ public class TcpTransportTests extends ESTestCase { /** Test ipv4 host with a default port works */ public void testParseV4DefaultPort() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("127.0.0.1", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("127.0.0.1", 1234); assertEquals(1, addresses.length); assertEquals("127.0.0.1", addresses[0].getAddress()); assertEquals(1234, addresses[0].getPort()); } - /** Test ipv4 host with a default port range works */ - public void testParseV4DefaultRange() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("127.0.0.1", "1234-1235", Integer.MAX_VALUE); - assertEquals(2, addresses.length); - - assertEquals("127.0.0.1", addresses[0].getAddress()); - assertEquals(1234, addresses[0].getPort()); - - assertEquals("127.0.0.1", addresses[1].getAddress()); - assertEquals(1235, addresses[1].getPort()); - } - /** Test ipv4 host with port works */ public void testParseV4WithPort() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("127.0.0.1:2345", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("127.0.0.1:2345", 1234); assertEquals(1, addresses.length); assertEquals("127.0.0.1", addresses[0].getAddress()); assertEquals(2345, addresses[0].getPort()); } - /** Test ipv4 host with port range works */ - public void testParseV4WithPortRange() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("127.0.0.1:2345-2346", "1234", Integer.MAX_VALUE); - assertEquals(2, addresses.length); - - assertEquals("127.0.0.1", addresses[0].getAddress()); - assertEquals(2345, addresses[0].getPort()); - - assertEquals("127.0.0.1", addresses[1].getAddress()); - assertEquals(2346, addresses[1].getPort()); - } - /** Test unbracketed ipv6 hosts in configuration fail. Leave no ambiguity */ public void testParseV6UnBracketed() throws Exception { try { - TcpTransport.parse("::1", "1234", Integer.MAX_VALUE); + TcpTransport.parse("::1", 1234); fail("should have gotten exception"); } catch (IllegalArgumentException expected) { assertTrue(expected.getMessage().contains("must be bracketed")); @@ -86,53 +73,107 @@ public class TcpTransportTests extends ESTestCase { /** Test ipv6 host with a default port works */ public void testParseV6DefaultPort() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("[::1]", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("[::1]", 1234); assertEquals(1, addresses.length); assertEquals("::1", addresses[0].getAddress()); assertEquals(1234, addresses[0].getPort()); } - /** Test ipv6 host with a default port range works */ - public void testParseV6DefaultRange() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("[::1]", "1234-1235", Integer.MAX_VALUE); - assertEquals(2, addresses.length); - - assertEquals("::1", addresses[0].getAddress()); - assertEquals(1234, addresses[0].getPort()); - - assertEquals("::1", addresses[1].getAddress()); - assertEquals(1235, addresses[1].getPort()); - } - /** Test ipv6 host with port works */ public void testParseV6WithPort() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("[::1]:2345", "1234", Integer.MAX_VALUE); + TransportAddress[] addresses = TcpTransport.parse("[::1]:2345", 1234); assertEquals(1, addresses.length); assertEquals("::1", addresses[0].getAddress()); assertEquals(2345, addresses[0].getPort()); } - /** Test ipv6 host with port range works */ - public void testParseV6WithPortRange() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("[::1]:2345-2346", "1234", Integer.MAX_VALUE); - assertEquals(2, addresses.length); - - assertEquals("::1", addresses[0].getAddress()); - assertEquals(2345, addresses[0].getPort()); - - assertEquals("::1", addresses[1].getAddress()); - assertEquals(2346, addresses[1].getPort()); + public void testRejectsPortRanges() { + expectThrows( + NumberFormatException.class, + () -> TcpTransport.parse("[::1]:100-200", 1000) + ); } - /** Test per-address limit */ - public void testAddressLimit() throws Exception { - TransportAddress[] addresses = TcpTransport.parse("[::1]:100-200", "1000", 3); - assertEquals(3, addresses.length); - assertEquals(100, addresses[0].getPort()); - assertEquals(101, addresses[1].getPort()); - assertEquals(102, addresses[2].getPort()); + public void testDefaultSeedAddressesWithDefaultPort() { + testDefaultSeedAddresses(Settings.EMPTY, containsInAnyOrder( + "[::1]:9300", "[::1]:9301", "[::1]:9302", "[::1]:9303", "[::1]:9304", "[::1]:9305", + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304", "127.0.0.1:9305")); + } + + public void testDefaultSeedAddressesWithNonstandardGlobalPortRange() { + testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500-9600").build(), containsInAnyOrder( + "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505")); + } + + public void testDefaultSeedAddressesWithSmallGlobalPortRange() { + testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9300-9302").build(), containsInAnyOrder( + "[::1]:9300", "[::1]:9301", "[::1]:9302", + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + } + + public void testDefaultSeedAddressesWithNonstandardProfilePortRange() { + testDefaultSeedAddresses(Settings.builder() + .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9500-9600") + .build(), + containsInAnyOrder( + "[::1]:9500", "[::1]:9501", "[::1]:9502", "[::1]:9503", "[::1]:9504", "[::1]:9505", + "127.0.0.1:9500", "127.0.0.1:9501", "127.0.0.1:9502", "127.0.0.1:9503", "127.0.0.1:9504", "127.0.0.1:9505")); + } + + public void testDefaultSeedAddressesWithSmallProfilePortRange() { + testDefaultSeedAddresses(Settings.builder() + .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9300-9302") + .build(), + containsInAnyOrder( + "[::1]:9300", "[::1]:9301", "[::1]:9302", + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + } + + public void testDefaultSeedAddressesPrefersProfileSettingToGlobalSetting() { + testDefaultSeedAddresses(Settings.builder() + .put(TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(TransportSettings.DEFAULT_PROFILE).getKey(), "9300-9302") + .put(TransportSettings.PORT.getKey(), "9500-9600") + .build(), + containsInAnyOrder( + "[::1]:9300", "[::1]:9301", "[::1]:9302", + "127.0.0.1:9300", "127.0.0.1:9301", "127.0.0.1:9302")); + } + + public void testDefaultSeedAddressesWithNonstandardSinglePort() { + testDefaultSeedAddresses(Settings.builder().put(TransportSettings.PORT.getKey(), "9500").build(), + containsInAnyOrder("[::1]:9500", "127.0.0.1:9500")); + } + + private void testDefaultSeedAddresses(final Settings settings, Matcher> seedAddressesMatcher) { + final TestThreadPool testThreadPool = new TestThreadPool("test"); + try { + final TcpTransport tcpTransport = new TcpTransport(settings, Version.CURRENT, testThreadPool, + new MockPageCacheRecycler(settings), + new NoneCircuitBreakerService(), writableRegistry(), new NetworkService(Collections.emptyList())) { + + @Override + protected TcpServerChannel bind(String name, InetSocketAddress address) { + throw new UnsupportedOperationException(); + } + + @Override + protected TcpChannel initiateChannel(DiscoveryNode node) { + throw new UnsupportedOperationException(); + } + + @Override + protected void stopInternal() { + throw new UnsupportedOperationException(); + } + }; + + assertThat(tcpTransport.getDefaultSeedAddresses(), seedAddressesMatcher); + } finally { + testThreadPool.shutdown(); + } } public void testDecodeWithIncompleteHeader() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java index ac58b0e25b9..dd46059aa2a 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportLoggerTests.java @@ -66,7 +66,7 @@ public class TransportLoggerTests extends ESTestCase { ", action: cluster:monitor/stats]" + " WRITE: \\d+B"; final MockLogAppender.LoggingExpectation writeExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "hot threads request", TransportLogger.class.getCanonicalName(), Level.TRACE, writePattern); final String readPattern = @@ -78,7 +78,7 @@ public class TransportLoggerTests extends ESTestCase { " READ: \\d+B"; final MockLogAppender.LoggingExpectation readExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "cluster monitor request", TransportLogger.class.getCanonicalName(), Level.TRACE, readPattern); appender.addExpectation(writeExpectation); diff --git a/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java new file mode 100644 index 00000000000..441e1bf4186 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -0,0 +1,791 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.versioning; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.coordination.LinearizabilityChecker; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.AbstractDisruptionTestCase; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.ServiceDisruptionScheme; +import org.elasticsearch.test.junit.annotations.TestLogging; + +import java.io.FileInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Random; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + + +/** + * This test stress tests CAS updates using sequence number based versioning (ifPrimaryTerm/ifSeqNo). + * + *

The following is a summary of the expected CAS write behaviour of the system:

+ * + *
    + *
  • acknowledged CAS writes are guaranteed to have taken place between invocation and response and cannot be lost. It is + * guaranteed that the previous value had the specified primaryTerm and seqNo
  • + *
  • CAS writes resulting in a VersionConflictEngineException might or might not have taken place or may take place in the future + * provided the primaryTerm and seqNo still matches. The reason we cannot assume it will not take place after receiving the failure + * is that a request can fork into two because of retries on disconnect, and now race against itself. The retry might complete (and do a + * dirty or stale read) before the forked off request gets to execute, and that one might still subsequently succeed. + * + * Such writes are not necessarily fully replicated and can be lost. There is no + * guarantee that the previous value did not have the specified primaryTerm and seqNo
  • + *
  • CAS writes with other exceptions might or might not have taken place. If they have taken place, then after invocation but not + * necessarily before response. Such writes are not necessarily fully replicated and can be lost. + *
  • + *
+ * + * A deeper technical explanation of the behaviour is given here: + * + *
    + *
  • A CAS can fail on its own write in at least two ways. In both cases, the write might have taken place even though we get a + * version conflict response. Even though we might observe the write (by reading (not done in this test) or another CAS write), the + * write could be lost since it is not fully replicated. Details: + *
      + *
    • A write is successfully stored on primary and one replica (r1). Replication to second replica fails, primary is demoted + * and r1 is promoted to primary. The request is repeated on r1, but this time the request fails due to its own write.
    • + *
    • A coordinator sends write to primary, which stores write successfully (and replicates it). Connection is lost before + * response is sent back. Once connection is back, coordinator will retry against either same or new primary, but this time the + * request will fail due to its own write. + *
    • + *
    + *
  • + *
  • A CAS can fail on stale reads. A CAS failure is only checked on the supposedly primary node. However, the primary might not be + * the newest primary (could be isolated or just not have been told yet). So a CAS check is suspect to stale reads (like any + * read) and can thus fail due to reading stale data. Notice that a CAS success is fully replicated and thus guaranteed to not + * suffer from stale (or dirty) reads. + *
  • + *
  • A CAS can fail on a dirty read, i.e., a non-replicated write that ends up being discarded.
  • + *
  • For any other failure, we do not know if the write will succeed after the failure. However, we do know that if we + * subsequently get back a CAS success with seqNo s, any previous failures with ifSeqNo < s will not be able to succeed (but could + * produce dirty writes on a stale primary). + *
  • + *
  • A CAS failure or any other failure can eventually succeed after receiving the failure response due to reroute and retries, + * see above.
  • + *
  • A CAS failure throws a VersionConflictEngineException which does not directly contain the current seqno/primary-term to use for + * the next request. It is contained in the message (and we parse it out in the test), but notice that the numbers given here could be + * stale or dirty, i.e., come from a stale primary or belong to a write that ends up being discarded.
  • + *
+ */ +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 4, maxNumDataNodes = 6, + transportClientRatio = 0) +@TestLogging("_root:DEBUG,org.elasticsearch.action.bulk:TRACE,org.elasticsearch.action.get:TRACE," + + "org.elasticsearch.discovery:TRACE,org.elasticsearch.action.support.replication:TRACE," + + "org.elasticsearch.cluster.service:TRACE,org.elasticsearch.indices.recovery:TRACE," + + "org.elasticsearch.indices.cluster:TRACE,org.elasticsearch.index.shard:TRACE") +public class ConcurrentSeqNoVersioningIT extends AbstractDisruptionTestCase { + + private static final Pattern EXTRACT_VERSION = Pattern.compile("current document has seqNo \\[(\\d+)\\] and primary term \\[(\\d+)\\]"); + + // Test info: disrupt network for up to 8s in a number of rounds and check that we only get true positive CAS results when running + // multiple threads doing CAS updates. + // Wait up to 1 minute (+10s in thread to ensure it does not time out) for threads to complete previous round before initiating next + // round. + public void testSeqNoCASLinearizability() { + final int disruptTimeSeconds = scaledRandomIntBetween(1, 8); + + assertAcked(prepareCreate("test") + .setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1 + randomInt(2)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(3)) + )); + + ensureGreen(); + + int numberOfKeys = randomIntBetween(1, 10); + + logger.info("--> Indexing initial doc for {} keys", numberOfKeys); + List partitions = + IntStream.range(0, numberOfKeys) + .mapToObj(i -> client().prepareIndex("test", "type", "ID:" + i).setSource("value", -1).get()) + .map(response -> + new Partition(response.getId(), new Version(response.getPrimaryTerm(), response.getSeqNo()))) + .collect(Collectors.toList()); + + int threadCount = randomIntBetween(3, 20); + CyclicBarrier roundBarrier = new CyclicBarrier(threadCount + 1); // +1 for main thread. + + List threads = + IntStream.range(0, threadCount) + .mapToObj(i -> new CASUpdateThread(i, roundBarrier, partitions, disruptTimeSeconds + 1)) + .collect(Collectors.toList()); + + logger.info("--> Starting {} threads", threadCount); + threads.forEach(Thread::start); + + try { + int rounds = randomIntBetween(2, 5); + + logger.info("--> Running {} rounds", rounds); + + for (int i = 0; i < rounds; ++i) { + ServiceDisruptionScheme disruptionScheme = addRandomDisruptionScheme(); + roundBarrier.await(1, TimeUnit.MINUTES); + disruptionScheme.startDisrupting(); + logger.info("--> round {}", i); + try { + roundBarrier.await(disruptTimeSeconds, TimeUnit.SECONDS); + } catch (TimeoutException e) { + roundBarrier.reset(); + } + internalCluster().clearDisruptionScheme(false); + // heal cluster faster to reduce test time. + ensureFullyConnectedCluster(); + } + } catch (InterruptedException | BrokenBarrierException | TimeoutException e) { + logger.error("Timed out, dumping stack traces of all threads:"); + threads.forEach( + thread -> logger.info(thread.toString() + ":\n" + ExceptionsHelper.formatStackTrace(thread.getStackTrace()))); + throw new RuntimeException(e); + } finally { + logger.info("--> terminating test"); + threads.forEach(CASUpdateThread::terminate); + threads.forEach(CASUpdateThread::await); + threads.stream().filter(Thread::isAlive).forEach(t -> fail("Thread still alive: " + t)); + } + + partitions.forEach(Partition::assertLinearizable); + } + + + private class CASUpdateThread extends Thread { + private final CyclicBarrier roundBarrier; + private final List partitions; + private final int timeout; + + private volatile boolean stop; + private final Random random = new Random(randomLong()); + + private CASUpdateThread(int threadNum, CyclicBarrier roundBarrier, List partitions, int timeout) { + super("CAS-Update-" + threadNum); + this.roundBarrier = roundBarrier; + this.partitions = partitions; + this.timeout = timeout; + setDaemon(true); + } + + public void run() { + while (stop == false) { + try { + roundBarrier.await(70, TimeUnit.SECONDS); + + int numberOfUpdates = randomIntBetween(3, 13) * partitions.size(); + for (int i = 0; i < numberOfUpdates; ++i) { + final int keyIndex = random.nextInt(partitions.size()); + final Partition partition = partitions.get(keyIndex); + + final int seqNoChangePct = random.nextInt(100); + + // we use either the latest observed or the latest successful version, to increase chance of getting successful + // CAS'es and races. If we were to use only the latest successful version, any CAS fail on own write would mean that + // all future CAS'es would fail unless we guess the seqno/term below. On the other hand, using latest observed + // version exclusively we risk a single CAS fail on a dirty read to cause the same. Doing both randomly and adding + // variance to seqno/term should ensure we progress fine in most runs. + Version version = random.nextBoolean() ? partition.latestObservedVersion() : partition.latestSuccessfulVersion(); + + if (seqNoChangePct < 10) { + version = version.nextSeqNo(random.nextInt(4) + 1); + } else if (seqNoChangePct < 15) { + version = version.previousSeqNo(random.nextInt(4) + 1); + } + + final int termChangePct = random.nextInt(100); + if (termChangePct < 5) { + version = version.nextTerm(); + } else if (termChangePct < 10) { + version = version.previousTerm(); + } + + IndexRequest indexRequest = new IndexRequest("test", "type", partition.id) + .source("value", random.nextInt()) + .setIfPrimaryTerm(version.primaryTerm) + .setIfSeqNo(version.seqNo); + Consumer historyResponse = partition.invoke(version); + try { + // we should be able to remove timeout or fail hard on timeouts + IndexResponse indexResponse = client().index(indexRequest).actionGet(timeout, TimeUnit.SECONDS); + IndexResponseHistoryOutput historyOutput = new IndexResponseHistoryOutput(indexResponse); + historyResponse.accept(historyOutput); + // validate version and seqNo strictly increasing for successful CAS to avoid that overhead during + // linearizability checking. + assertThat(historyOutput.outputVersion, greaterThan(version)); + assertThat(historyOutput.outputVersion.seqNo, greaterThan(version.seqNo)); + } catch (VersionConflictEngineException e) { + // if we supplied an input version <= latest successful version, we can safely assume that any failed + // operation will no longer be able to complete after the next successful write and we can therefore terminate + // the operation wrt. linearizability. + // todo: collect the failed responses and terminate when CAS with higher output version is successful, since + // this is the guarantee we offer. + if (version.compareTo(partition.latestSuccessfulVersion()) <= 0) { + historyResponse.accept(new CASFailureHistoryOutput(e)); + } + } catch (RuntimeException e) { + // if we supplied an input version <= to latest successful version, we can safely assume that any failed + // operation will no longer be able to complete after the next successful write and we can therefore terminate + // the operation wrt. linearizability. + // todo: collect the failed responses and terminate when CAS with higher output version is successful, since + // this is the guarantee we offer. + if (version.compareTo(partition.latestSuccessfulVersion()) <= 0) { + historyResponse.accept(new FailureHistoryOutput()); + } + logger.info( + new ParameterizedMessage("Received failure for request [{}], version [{}]", indexRequest, version), + e); + if (stop) { + // interrupt often comes as a RuntimeException so check to stop here too. + return; + } + } + } + } catch (InterruptedException e) { + assert stop : "should only be interrupted when stopped"; + } catch (BrokenBarrierException e) { + // a thread can go here either because it completed before disruption ended, timeout on main thread causes broken + // barrier + } catch (TimeoutException e) { + // this is timeout on the barrier, unexpected. + throw new AssertionError("Unexpected timeout in thread: " + Thread.currentThread(), e); + } + } + } + + public void terminate() { + stop = true; + this.interrupt(); + } + + public void await() { + try { + join(60000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } + + /** + * Our version, which is primaryTerm,seqNo. + */ + private static final class Version implements NamedWriteable, Comparable { + public final long primaryTerm; + public final long seqNo; + + Version(long primaryTerm, long seqNo) { + this.primaryTerm = primaryTerm; + this.seqNo = seqNo; + } + + Version(StreamInput input) throws IOException { + this.primaryTerm = input.readLong(); + this.seqNo = input.readLong(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Version version = (Version) o; + return primaryTerm == version.primaryTerm && + seqNo == version.seqNo; + } + + @Override + public int hashCode() { + return Objects.hash(primaryTerm, seqNo); + } + + @Override + public int compareTo(Version other) { + int termCompare = Long.compare(primaryTerm, other.primaryTerm); + if (termCompare != 0) + return termCompare; + return Long.compare(seqNo, other.seqNo); + } + + @Override + public String toString() { + return "{" + "primaryTerm=" + primaryTerm + ", seqNo=" + seqNo + '}'; + } + + public Version nextSeqNo(int increment) { + return new Version(primaryTerm, seqNo + increment); + } + + public Version previousSeqNo(int decrement) { + return new Version(primaryTerm, Math.max(seqNo - decrement, 0)); + } + + @Override + public String getWriteableName() { + return "version"; + } + + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(primaryTerm); + out.writeLong(seqNo); + } + + public Version previousTerm() { + return new Version(primaryTerm - 1, seqNo); + } + + public Version nextTerm() { + return new Version(primaryTerm + 1, seqNo); + } + } + + private static class AtomicVersion { + private final AtomicReference current; + + private AtomicVersion(Version initialVersion) { + this.current = new AtomicReference<>(initialVersion); + } + + public Version get() { + return current.get(); + } + + public void consume(Version version) { + if (version == null) + return; + this.current.updateAndGet(current -> version.compareTo(current) <= 0 ? current : version); + } + } + + private class Partition { + private final String id; + private final AtomicVersion latestSuccessfulVersion; + private final AtomicVersion latestObservedVersion; + private final Version initialVersion; + private final LinearizabilityChecker.History history = new LinearizabilityChecker.History(); + + private Partition(String id, Version initialVersion) { + this.id = id; + this.latestSuccessfulVersion = new AtomicVersion(initialVersion); + this.latestObservedVersion = new AtomicVersion(initialVersion); + this.initialVersion = initialVersion; + } + + // latest version that was observed, possibly dirty read of a write that does not survive + public Version latestObservedVersion() { + return latestObservedVersion.get(); + } + + // latest version for which we got a successful response on a write. + public Version latestSuccessfulVersion() { + return latestSuccessfulVersion.get(); + } + + public Consumer invoke(Version version) { + int eventId = history.invoke(version); + logger.debug("invocation partition ({}) event ({}) version ({})", id, eventId, version); + return output -> consumeOutput(output, eventId); + } + + private void consumeOutput(HistoryOutput output, int eventId) { + history.respond(eventId, output); + logger.debug("response partition ({}) event ({}) output ({})", id, eventId, output); + latestObservedVersion.consume(output.getVersion()); + if (output instanceof IndexResponseHistoryOutput) { + latestSuccessfulVersion.consume(output.getVersion()); + } + } + + public boolean isLinearizable() { + logger.info("--> Linearizability checking history of size: {} for key: {} and initialVersion: {}: {}", history.size(), + id, initialVersion, history); + LinearizabilityChecker.SequentialSpec spec = new CASSequentialSpec(initialVersion); + boolean linearizable = new LinearizabilityChecker().isLinearizable(spec, history, missingResponseGenerator()); + // implicitly test that we can serialize all histories. + String serializedHistory = base64Serialize(history); + if (linearizable == false) { + // we dump base64 encoded data, since the nature of this test is that it does not reproduce even with same seed. + logger.error("Linearizability check failed. Spec: {}, initial version: {}, serialized history: {}", spec, initialVersion, + serializedHistory); + } + return linearizable; + } + + public void assertLinearizable() { + assertTrue("Must be linearizable", isLinearizable()); + } + } + + private static class CASSequentialSpec implements LinearizabilityChecker.SequentialSpec { + private final Version initialVersion; + + private CASSequentialSpec(Version initialVersion) { + this.initialVersion = initialVersion; + } + + @Override + public Object initialState() { + return casSuccess(initialVersion); + } + + @Override + public Optional nextState(Object currentState, Object input, Object output) { + State state = (State) currentState; + if (output instanceof IndexResponseHistoryOutput) { + if (input.equals(state.safeVersion) || + (state.lastFailed && ((Version) input).compareTo(state.safeVersion) > 0)) { + return Optional.of(casSuccess(((IndexResponseHistoryOutput) output).getVersion())); + } else { + return Optional.empty(); + } + } else { + return Optional.of(state.failed()); + } + } + } + + private static final class State { + private final Version safeVersion; + private final boolean lastFailed; + + private State(Version safeVersion, boolean lastFailed) { + this.safeVersion = safeVersion; + this.lastFailed = lastFailed; + } + + public State failed() { + return lastFailed ? this : casFail(safeVersion); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + State that = (State) o; + return lastFailed == that.lastFailed && + safeVersion.equals(that.safeVersion); + } + + @Override + public int hashCode() { + return Objects.hash(safeVersion, lastFailed); + } + + @Override + public String toString() { + return "State{" + + "safeVersion=" + safeVersion + + ", lastFailed=" + lastFailed + + '}'; + } + } + + private static State casFail(Version stateVersion) { + return new State(stateVersion, true); + } + + private static State casSuccess(Version version1) { + return new State(version1, false); + } + + /** + * HistoryOutput contains the information from the output of calls. + */ + private interface HistoryOutput extends NamedWriteable { + Version getVersion(); + } + + private static class IndexResponseHistoryOutput implements HistoryOutput { + private final Version outputVersion; + + private IndexResponseHistoryOutput(IndexResponse response) { + this(new Version(response.getPrimaryTerm(), response.getSeqNo())); + } + + private IndexResponseHistoryOutput(StreamInput input) throws IOException { + this(new Version(input)); + } + + private IndexResponseHistoryOutput(Version outputVersion) { + this.outputVersion = outputVersion; + } + + @Override + public Version getVersion() { + return outputVersion; + } + + @Override + public String toString() { + return "Index{" + outputVersion + "}"; + } + + @Override + public String getWriteableName() { + return "index"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + outputVersion.writeTo(out); + } + } + + /** + * We treat CAS failures (version conflicts) identically to failures in linearizability checker, but keep this separate + * to parse out the latest observed version and to ease debugging. + */ + private static class CASFailureHistoryOutput implements HistoryOutput { + private Version outputVersion; + private CASFailureHistoryOutput(VersionConflictEngineException exception) { + this(parseException(exception.getMessage())); + } + + private CASFailureHistoryOutput(StreamInput input) throws IOException { + this(new Version(input)); + } + + private CASFailureHistoryOutput(Version outputVersion) { + this.outputVersion = outputVersion; + } + + private static Version parseException(String message) { + // parsing out the version increases chance of hitting races against CAS successes, since if we did not parse this out, no + // writes would succeed after a fail on own write failure (unless we were lucky enough to guess the seqNo/primaryTerm using the + // random futureTerm/futureSeqNo handling in CASUpdateThread). + try { + Matcher matcher = EXTRACT_VERSION.matcher(message); + matcher.find(); + return new Version(Long.parseLong(matcher.group(2)), Long.parseLong(matcher.group(1))); + } catch (RuntimeException e) { + throw new RuntimeException("Unable to parse message: " + message, e); + } + } + + @Override + public Version getVersion() { + return outputVersion; + } + + @Override + public String toString() { + return "CASFail{" + outputVersion + "}"; + } + + @Override + public String getWriteableName() { + return "casfail"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + outputVersion.writeTo(out); + } + } + + /** + * A non version conflict failure. + */ + private static class FailureHistoryOutput implements HistoryOutput { + + private FailureHistoryOutput() { + } + + private FailureHistoryOutput(@SuppressWarnings("unused") StreamInput streamInput) { + } + + @Override + public Version getVersion() { + return null; + } + + @Override + public String toString() { + return "Fail"; + } + + @Override + public String getWriteableName() { + return "fail"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // nothing to write. + } + } + + private static Function missingResponseGenerator() { + return input -> new FailureHistoryOutput(); + } + + private String base64Serialize(LinearizabilityChecker.History history) { + BytesStreamOutput output = new BytesStreamOutput(); + try { + List events = history.copyEvents(); + output.writeInt(events.size()); + for (LinearizabilityChecker.Event event : events) { + writeEvent(event, output); + } + output.close(); + return Base64.getEncoder().encodeToString(BytesReference.toBytes(output.bytes())); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static LinearizabilityChecker.History readHistory(StreamInput input) throws IOException { + int size = input.readInt(); + List events = new ArrayList<>(size); + for (int i = 0; i < size; ++i) { + events.add(readEvent(input)); + } + return new LinearizabilityChecker.History(events); + } + + private static void writeEvent(LinearizabilityChecker.Event event, BytesStreamOutput output) throws IOException { + output.writeEnum(event.type); + output.writeNamedWriteable((NamedWriteable) event.value); + output.writeInt(event.id); + } + + private static LinearizabilityChecker.Event readEvent(StreamInput input) throws IOException { + return new LinearizabilityChecker.Event(input.readEnum(LinearizabilityChecker.EventType.class), + input.readNamedWriteable(NamedWriteable.class), input.readInt()); + } + + @SuppressForbidden(reason = "system err is ok for a command line tool") + public static void main(String[] args) throws Exception { + if (args.length < 3) { + System.err.println("usage: "); + } else { + runLinearizabilityChecker(new FileInputStream(args[0]), Long.parseLong(args[1]), Long.parseLong(args[2])); + } + } + + @SuppressForbidden(reason = "system out is ok for a command line tool") + private static void runLinearizabilityChecker(FileInputStream fileInputStream, long primaryTerm, long seqNo) throws IOException { + StreamInput is = new InputStreamStreamInput(Base64.getDecoder().wrap(fileInputStream)); + is = new NamedWriteableAwareStreamInput(is, createNamedWriteableRegistry()); + + LinearizabilityChecker.History history = readHistory(is); + + Version initialVersion = new Version(primaryTerm, seqNo); + boolean result = + new LinearizabilityChecker().isLinearizable(new CASSequentialSpec(initialVersion), history, + missingResponseGenerator()); + + System.out.println(LinearizabilityChecker.visualize(new CASSequentialSpec(initialVersion), history, + missingResponseGenerator())); + + System.out.println("Linearizable?: " + result); + } + + private static NamedWriteableRegistry createNamedWriteableRegistry() { + return new NamedWriteableRegistry(Arrays.asList( + new NamedWriteableRegistry.Entry(NamedWriteable.class, "version", Version::new), + new NamedWriteableRegistry.Entry(NamedWriteable.class, "index", IndexResponseHistoryOutput::new), + new NamedWriteableRegistry.Entry(NamedWriteable.class, "casfail", CASFailureHistoryOutput::new), + new NamedWriteableRegistry.Entry(NamedWriteable.class, "fail", FailureHistoryOutput::new) + )); + } + + public void testSequentialSpec() { + // Generate 3 increasing versions + Version version1 = new Version(randomIntBetween(1, 5), randomIntBetween(0, 100)); + Version version2 = futureVersion(version1); + Version version3 = futureVersion(version2); + + List versions = Arrays.asList(version1, version2, version3); + + LinearizabilityChecker.SequentialSpec spec = new CASSequentialSpec(version1); + + assertThat(spec.initialState(), equalTo(casSuccess(version1))); + + assertThat(spec.nextState(casSuccess(version1), version1, new IndexResponseHistoryOutput(version2)), + equalTo(Optional.of(casSuccess(version2)))); + assertThat(spec.nextState(casFail(version1), version2, new IndexResponseHistoryOutput(version3)), + equalTo(Optional.of(casSuccess(version3)))); + assertThat(spec.nextState(casSuccess(version1), version2, new IndexResponseHistoryOutput(version3)), + equalTo(Optional.empty())); + assertThat(spec.nextState(casSuccess(version2), version1, new IndexResponseHistoryOutput(version3)), + equalTo(Optional.empty())); + assertThat(spec.nextState(casFail(version2), version1, new IndexResponseHistoryOutput(version3)), + equalTo(Optional.empty())); + + // for version conflicts, we keep state version with lastFailed set, regardless of input/output version. + versions.forEach(stateVersion -> + versions.forEach(inputVersion -> + versions.forEach(outputVersion -> { + assertThat(spec.nextState(casSuccess(stateVersion), inputVersion, new CASFailureHistoryOutput(outputVersion)), + equalTo(Optional.of(casFail(stateVersion)))); + assertThat(spec.nextState(casFail(stateVersion), inputVersion, new CASFailureHistoryOutput(outputVersion)), + equalTo(Optional.of(casFail(stateVersion)))); + }) + ) + ); + + // for non version conflict failures, we keep state version with lastFailed set, regardless of input version. + versions.forEach(stateVersion -> + versions.forEach(inputVersion -> { + assertThat(spec.nextState(casSuccess(stateVersion), inputVersion, new FailureHistoryOutput()), + equalTo(Optional.of(casFail(stateVersion)))); + assertThat(spec.nextState(casFail(stateVersion), inputVersion, new FailureHistoryOutput()), + equalTo(Optional.of(casFail(stateVersion)))); + }) + ); + } + + private Version futureVersion(Version version) { + Version futureVersion = version.nextSeqNo(randomIntBetween(1,10)); + if (randomBoolean()) + futureVersion = futureVersion.nextTerm(); + return futureVersion; + } + } diff --git a/settings.gradle b/settings.gradle index a05e0bf0003..7532230e7b8 100644 --- a/settings.gradle +++ b/settings.gradle @@ -29,6 +29,8 @@ List projects = [ 'distribution:archives:oss-no-jdk-linux-tar', 'distribution:archives:no-jdk-linux-tar', 'distribution:docker', + 'distribution:docker:oss-docker-build-context', + 'distribution:docker:docker-build-context', 'distribution:packages:oss-deb', 'distribution:packages:deb', 'distribution:packages:oss-no-jdk-deb', diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java b/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndSource.java similarity index 58% rename from test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java rename to test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndSource.java index b24a010c1a0..a48e813c1a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndTerm.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/DocIdSeqNoAndSource.java @@ -20,24 +20,34 @@ package org.elasticsearch.index.engine; +import org.apache.lucene.util.BytesRef; + import java.util.Objects; -/** A tuple of document id, sequence number and primary term of a document */ -public final class DocIdSeqNoAndTerm { +/** A tuple of document id, sequence number, primary term, source and version of a document */ +public final class DocIdSeqNoAndSource { private final String id; + private final BytesRef source; private final long seqNo; private final long primaryTerm; + private final long version; - public DocIdSeqNoAndTerm(String id, long seqNo, long primaryTerm) { + public DocIdSeqNoAndSource(String id, BytesRef source, long seqNo, long primaryTerm, long version) { this.id = id; + this.source = source; this.seqNo = seqNo; this.primaryTerm = primaryTerm; + this.version = version; } public String getId() { return id; } + public BytesRef getSource() { + return source; + } + public long getSeqNo() { return seqNo; } @@ -46,21 +56,27 @@ public final class DocIdSeqNoAndTerm { return primaryTerm; } + public long getVersion() { + return version; + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - DocIdSeqNoAndTerm that = (DocIdSeqNoAndTerm) o; - return Objects.equals(id, that.id) && seqNo == that.seqNo && primaryTerm == that.primaryTerm; + DocIdSeqNoAndSource that = (DocIdSeqNoAndSource) o; + return Objects.equals(id, that.id) && Objects.equals(source, that.source) + && seqNo == that.seqNo && primaryTerm == that.primaryTerm && version == that.version; } @Override public int hashCode() { - return Objects.hash(id, seqNo, primaryTerm); + return Objects.hash(id, source, seqNo, primaryTerm, version); } @Override public String toString() { - return "DocIdSeqNoAndTerm{" + "id='" + id + " seqNo=" + seqNo + " primaryTerm=" + primaryTerm + "}"; + return "doc{" + "id='" + id + " seqNo=" + seqNo + " primaryTerm=" + primaryTerm + + " version=" + version + " source= " + (source != null ? source.utf8ToString() : null) + "}"; } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index cdd4199c482..aebda583906 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -63,6 +63,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -995,16 +996,17 @@ public abstract class EngineTestCase extends ESTestCase { /** * Gets a collection of tuples of docId, sequence number, and primary term of all live documents in the provided engine. */ - public static List getDocIds(Engine engine, boolean refresh) throws IOException { + public static List getDocIds(Engine engine, boolean refresh) throws IOException { if (refresh) { engine.refresh("test_get_doc_ids"); } try (Engine.Searcher searcher = engine.acquireSearcher("test_get_doc_ids")) { - List docs = new ArrayList<>(); + List docs = new ArrayList<>(); for (LeafReaderContext leafContext : searcher.reader().leaves()) { LeafReader reader = leafContext.reader(); NumericDocValues seqNoDocValues = reader.getNumericDocValues(SeqNoFieldMapper.NAME); NumericDocValues primaryTermDocValues = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); + NumericDocValues versionDocValues = reader.getNumericDocValues(VersionFieldMapper.NAME); Bits liveDocs = reader.getLiveDocs(); for (int i = 0; i < reader.maxDoc(); i++) { if (liveDocs == null || liveDocs.get(i)) { @@ -1013,20 +1015,25 @@ public abstract class EngineTestCase extends ESTestCase { continue; } final long primaryTerm = primaryTermDocValues.longValue(); - Document uuid = reader.document(i, Collections.singleton(IdFieldMapper.NAME)); - BytesRef binaryID = uuid.getBinaryValue(IdFieldMapper.NAME); + Document doc = reader.document(i, Sets.newHashSet(IdFieldMapper.NAME, SourceFieldMapper.NAME)); + BytesRef binaryID = doc.getBinaryValue(IdFieldMapper.NAME); String id = Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length)); + final BytesRef source = doc.getBinaryValue(SourceFieldMapper.NAME); if (seqNoDocValues.advanceExact(i) == false) { throw new AssertionError("seqNoDocValues not found for doc[" + i + "] id[" + id + "]"); } final long seqNo = seqNoDocValues.longValue(); - docs.add(new DocIdSeqNoAndTerm(id, seqNo, primaryTerm)); + if (versionDocValues.advanceExact(i) == false) { + throw new AssertionError("versionDocValues not found for doc[" + i + "] id[" + id + "]"); + } + final long version = versionDocValues.longValue(); + docs.add(new DocIdSeqNoAndSource(id, source, seqNo, primaryTerm, version)); } } } - docs.sort(Comparator.comparingLong(DocIdSeqNoAndTerm::getSeqNo) - .thenComparingLong(DocIdSeqNoAndTerm::getPrimaryTerm) - .thenComparing((DocIdSeqNoAndTerm::getId))); + docs.sort(Comparator.comparingLong(DocIdSeqNoAndSource::getSeqNo) + .thenComparingLong(DocIdSeqNoAndSource::getPrimaryTerm) + .thenComparing((DocIdSeqNoAndSource::getId))); return docs; } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index e3fa9351497..d88cdf488fb 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -65,7 +65,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; @@ -479,7 +479,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase if (closed == false) { closed = true; try { - final List docsOnPrimary = getDocIdAndSeqNos(primary); + final List docsOnPrimary = getDocIdAndSeqNos(primary); for (IndexShard replica : replicas) { assertThat(replica.getMaxSeenAutoIdTimestamp(), equalTo(primary.getMaxSeenAutoIdTimestamp())); assertThat(replica.getMaxSeqNoOfUpdatesOrDeletes(), greaterThanOrEqualTo(primary.getMaxSeqNoOfUpdatesOrDeletes())); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 5b5ff8de01d..105ec5415d6 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -51,7 +51,7 @@ import org.elasticsearch.index.MapperTestUtils; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.query.DisabledQueryCache; -import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineTestCase; @@ -703,10 +703,10 @@ public abstract class IndexShardTestCase extends ESTestCase { } public static Set getShardDocUIDs(final IndexShard shard) throws IOException { - return getDocIdAndSeqNos(shard).stream().map(DocIdSeqNoAndTerm::getId).collect(Collectors.toSet()); + return getDocIdAndSeqNos(shard).stream().map(DocIdSeqNoAndSource::getId).collect(Collectors.toSet()); } - public static List getDocIdAndSeqNos(final IndexShard shard) throws IOException { + public static List getDocIdAndSeqNos(final IndexShard shard) throws IOException { return EngineTestCase.getDocIds(shard.getEngine(), true); } @@ -721,6 +721,9 @@ public abstract class IndexShardTestCase extends ESTestCase { } public static void assertConsistentHistoryBetweenTranslogAndLucene(IndexShard shard) throws IOException { + if (shard.state() != IndexShardState.POST_RECOVERY && shard.state() != IndexShardState.STARTED) { + return; + } final Engine engine = shard.getEngineOrNull(); if (engine != null) { EngineTestCase.assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, shard.mapperService()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 0f1a3fb3dcb..38afff7552a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -194,6 +194,10 @@ public abstract class AbstractBuilderTestCase extends ESTestCase { return ALIAS_TO_CONCRETE_FIELD_NAME.getOrDefault(builderFieldName, builderFieldName); } + protected Iterable getMapping() { + return serviceHolder.mapperService.fieldTypes(); + } + @AfterClass public static void afterClass() throws Exception { IOUtils.close(serviceHolder); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireTestCase.java index bcd0c7cd3a7..3e58dc8809d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireTestCase.java @@ -54,7 +54,7 @@ public abstract class AbstractWireTestCase extends ESTestCase { /** * Tests that the equals and hashcode methods are consistent and copied - * versions of the instance have are equal. + * versions of the instance are equal. */ public final void testEqualsAndHashcode() { for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index c41a0fdcbef..679280ee2bd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -85,7 +85,7 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; -import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; @@ -1455,7 +1455,7 @@ public final class InternalTestCluster extends TestCluster { if (primaryShard == null) { continue; } - final List docsOnPrimary; + final List docsOnPrimary; try { docsOnPrimary = IndexShardTestCase.getDocIdAndSeqNos(primaryShard); } catch (AlreadyClosedException ex) { @@ -1466,7 +1466,7 @@ public final class InternalTestCluster extends TestCluster { if (replicaShard == null) { continue; } - final List docsOnReplica; + final List docsOnReplica; try { docsOnReplica = IndexShardTestCase.getDocIdAndSeqNos(replicaShard); } catch (AlreadyClosedException ex) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index 22a12e74c35..c6a5d77faf5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -159,7 +159,7 @@ public class MockLogAppender extends AbstractAppender { } - public static class PatternSeenEventExcpectation implements LoggingExpectation { + public static class PatternSeenEventExpectation implements LoggingExpectation { protected final String name; protected final String logger; @@ -167,7 +167,7 @@ public class MockLogAppender extends AbstractAppender { protected final String pattern; volatile boolean saw; - public PatternSeenEventExcpectation(String name, String logger, Level level, String pattern) { + public PatternSeenEventExpectation(String name, String logger, Level level, String pattern) { this.name = name; this.logger = logger; this.level = level; diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index b1a4c42cbfd..054b126b4ef 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -82,8 +82,11 @@ public class ReproduceInfoPrinter extends RunListener { b.append(task); b.append(" --tests \""); b.append(failure.getDescription().getClassName()); - b.append("."); - b.append(failure.getDescription().getMethodName()); + final String methodName = failure.getDescription().getMethodName(); + if (methodName != null) { + b.append("."); + b.append(failure.getDescription().getMethodName()); + } b.append("\""); GradleMessageBuilder gradleMessageBuilder = new GradleMessageBuilder(b); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index a9c70deaaea..28c4de9ee4b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -208,7 +208,7 @@ public class MockTransport implements Transport, LifecycleComponent { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) { + public TransportAddress[] addressesFromString(String address) { return new TransportAddress[0]; } @@ -238,7 +238,7 @@ public class MockTransport implements Transport, LifecycleComponent { } @Override - public List getLocalAddresses() { + public List getDefaultSeedAddresses() { return Collections.emptyList(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 408748d4193..d7aa6e5903e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -68,6 +69,7 @@ import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.LinkedBlockingDeque; import java.util.concurrent.TimeUnit; import java.util.function.Function; @@ -307,26 +309,38 @@ public final class MockTransportService extends TransportService { Supplier delaySupplier = () -> new TimeValue(duration.millis() - (System.currentTimeMillis() - startTime)); - transport().addConnectBehavior(transportAddress, (transport, discoveryNode, profile, listener) -> { - TimeValue delay = delaySupplier.get(); - if (delay.millis() <= 0) { - return original.openConnection(discoveryNode, profile, listener); + transport().addConnectBehavior(transportAddress, new StubbableTransport.OpenConnectionBehavior() { + private CountDownLatch stopLatch = new CountDownLatch(1); + @Override + public Releasable openConnection(Transport transport, DiscoveryNode discoveryNode, + ConnectionProfile profile, ActionListener listener) { + TimeValue delay = delaySupplier.get(); + if (delay.millis() <= 0) { + return original.openConnection(discoveryNode, profile, listener); + } + + // TODO: Replace with proper setting + TimeValue connectingTimeout = TransportSettings.CONNECT_TIMEOUT.getDefault(Settings.EMPTY); + try { + if (delay.millis() < connectingTimeout.millis()) { + stopLatch.await(delay.millis(), TimeUnit.MILLISECONDS); + return original.openConnection(discoveryNode, profile, listener); + } else { + stopLatch.await(connectingTimeout.millis(), TimeUnit.MILLISECONDS); + listener.onFailure(new ConnectTransportException(discoveryNode, "UNRESPONSIVE: simulated")); + return () -> { + }; + } + } catch (InterruptedException e) { + listener.onFailure(new ConnectTransportException(discoveryNode, "UNRESPONSIVE: simulated")); + return () -> { + }; + } } - // TODO: Replace with proper setting - TimeValue connectingTimeout = TransportSettings.CONNECT_TIMEOUT.getDefault(Settings.EMPTY); - try { - if (delay.millis() < connectingTimeout.millis()) { - Thread.sleep(delay.millis()); - return original.openConnection(discoveryNode, profile, listener); - } else { - Thread.sleep(connectingTimeout.millis()); - listener.onFailure(new ConnectTransportException(discoveryNode, "UNRESPONSIVE: simulated")); - return () -> {}; - } - } catch (InterruptedException e) { - listener.onFailure(new ConnectTransportException(discoveryNode, "UNRESPONSIVE: simulated")); - return () -> {}; + @Override + public void clearCallback() { + stopLatch.countDown(); } }); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 4ccc352158a..d812fdffe96 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -87,7 +87,10 @@ public final class StubbableTransport implements Transport { if (behavior != null) { behavior.clearCallback(); } - connectBehaviors.remove(transportAddress); + OpenConnectionBehavior openConnectionBehavior = connectBehaviors.remove(transportAddress); + if (openConnectionBehavior != null) { + openConnectionBehavior.clearCallback(); + } } Transport getDelegate() { @@ -115,13 +118,13 @@ public final class StubbableTransport implements Transport { } @Override - public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { - return delegate.addressesFromString(address, perAddressLimit); + public TransportAddress[] addressesFromString(String address) throws UnknownHostException { + return delegate.addressesFromString(address); } @Override - public List getLocalAddresses() { - return delegate.getLocalAddresses(); + public List getDefaultSeedAddresses() { + return delegate.getDefaultSeedAddresses(); } @Override @@ -246,6 +249,8 @@ public final class StubbableTransport implements Transport { Releasable openConnection(Transport transport, DiscoveryNode discoveryNode, ConnectionProfile profile, ActionListener listener); + + default void clearCallback() {} } @FunctionalInterface diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 14d9659c1c2..0985054f5b6 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1053,19 +1053,19 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { final String requestSent = ".*\\[internal:test].*sent to.*\\{TS_B}.*"; final MockLogAppender.LoggingExpectation requestSentExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "sent request", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, requestSent); final String requestReceived = ".*\\[internal:test].*received request.*"; final MockLogAppender.LoggingExpectation requestReceivedExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "received request", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, requestReceived); final String responseSent = ".*\\[internal:test].*sent response.*"; final MockLogAppender.LoggingExpectation responseSentExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "sent response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, responseSent); final String responseReceived = ".*\\[internal:test].*received response from.*\\{TS_B}.*"; final MockLogAppender.LoggingExpectation responseReceivedExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "received response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, responseReceived); appender.addExpectation(requestSentExpectation); @@ -1080,12 +1080,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { final String errorResponseSent = ".*\\[internal:testError].*sent error response.*"; final MockLogAppender.LoggingExpectation errorResponseSentExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "sent error response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, errorResponseSent); final String errorResponseReceived = ".*\\[internal:testError].*received response from.*\\{TS_B}.*"; final MockLogAppender.LoggingExpectation errorResponseReceivedExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "received error response", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, errorResponseReceived); appender.addExpectation(errorResponseSentExpectation); @@ -1101,7 +1101,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { "not seen request sent", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, notSeenSent); final String notSeenReceived = ".*\\[internal:testNotSeen].*received request.*"; final MockLogAppender.LoggingExpectation notSeenReceivedExpectation = - new MockLogAppender.PatternSeenEventExcpectation( + new MockLogAppender.PatternSeenEventExpectation( "not seen request received", "org.elasticsearch.transport.TransportService.tracer", Level.TRACE, notSeenReceived); appender.addExpectation(notSeenSentExpectation); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index abb92979f8d..39316ca9192 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -37,8 +37,8 @@ import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.BytesWriteHandler; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioSelectorGroup; import org.elasticsearch.nio.NioSelector; +import org.elasticsearch.nio.NioSelectorGroup; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.Page; @@ -61,7 +61,7 @@ import java.util.HashSet; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.function.Consumer; -import java.util.function.Supplier; +import java.util.function.IntFunction; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -192,9 +192,13 @@ public class MockNioTransport extends TcpTransport { @Override public MockSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { MockSocketChannel nioChannel = new MockSocketChannel(isClient == false, profileName, channel); - Supplier pageSupplier = () -> { - Recycler.V bytes = pageCacheRecycler.bytePage(false); - return new Page(ByteBuffer.wrap(bytes.v()), bytes::close); + IntFunction pageSupplier = (length) -> { + if (length > PageCacheRecycler.BYTE_PAGE_SIZE) { + return new Page(ByteBuffer.allocate(length), () -> {}); + } else { + Recycler.V bytes = pageCacheRecycler.bytePage(false); + return new Page(ByteBuffer.wrap(bytes.v(), 0, length), bytes::close); + } }; MockTcpReadWriteHandler readWriteHandler = new MockTcpReadWriteHandler(nioChannel, MockNioTransport.this); BytesChannelContext context = new BytesChannelContext(nioChannel, selector, (e) -> exceptionCaught(nioChannel, e), diff --git a/x-pack/docs/en/watcher/java.asciidoc b/x-pack/docs/en/watcher/java.asciidoc index e5cb6b54b0c..7224196834f 100644 --- a/x-pack/docs/en/watcher/java.asciidoc +++ b/x-pack/docs/en/watcher/java.asciidoc @@ -60,6 +60,7 @@ repositories { // Add the Elasticsearch Maven Repository maven { + name "elastic" url "https://artifacts.elastic.co/maven" } } diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index dbe1cdf51ef..a808a7197cc 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -59,10 +59,6 @@ dependencyLicenses { ignoreSha 'x-pack-core' } -run { - plugin xpackModule('core') -} - testingConventions.naming { IT { baseClass "org.elasticsearch.xpack.CcrIntegTestCase" diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index fd84725e4bd..dea3da2a3ba 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -49,7 +49,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -490,13 +490,13 @@ public abstract class CcrIntegTestCase extends ESTestCase { protected void assertIndexFullyReplicatedToFollower(String leaderIndex, String followerIndex) throws Exception { logger.info("--> asserting <> between {} and {}", leaderIndex, followerIndex); assertBusy(() -> { - Map> docsOnFollower = getDocIdAndSeqNos(clusterGroup.followerCluster, followerIndex); - Map> docsOnLeader = getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex); - Map> mismatchedDocs = new HashMap<>(); - for (Map.Entry> fe : docsOnFollower.entrySet()) { - Set d1 = Sets.difference( + Map> docsOnFollower = getDocIdAndSeqNos(clusterGroup.followerCluster, followerIndex); + Map> docsOnLeader = getDocIdAndSeqNos(clusterGroup.leaderCluster, leaderIndex); + Map> mismatchedDocs = new HashMap<>(); + for (Map.Entry> fe : docsOnFollower.entrySet()) { + Set d1 = Sets.difference( Sets.newHashSet(fe.getValue()), Sets.newHashSet(docsOnLeader.getOrDefault(fe.getKey(), Collections.emptyList()))); - Set d2 = Sets.difference( + Set d2 = Sets.difference( Sets.newHashSet(docsOnLeader.getOrDefault(fe.getKey(), Collections.emptyList())), Sets.newHashSet(fe.getValue())); if (d1.isEmpty() == false || d2.isEmpty() == false) { mismatchedDocs.put(fe.getKey(), Sets.union(d1, d2)); @@ -525,11 +525,11 @@ public abstract class CcrIntegTestCase extends ESTestCase { }, 120, TimeUnit.SECONDS); } - private Map> getDocIdAndSeqNos(InternalTestCluster cluster, String index) throws IOException { + private Map> getDocIdAndSeqNos(InternalTestCluster cluster, String index) throws IOException { final ClusterState state = cluster.client().admin().cluster().prepareState().get().getState(); List shardRoutings = state.routingTable().allShards(index); Randomness.shuffle(shardRoutings); - final Map> docs = new HashMap<>(); + final Map> docs = new HashMap<>(); for (ShardRouting shardRouting : shardRoutings) { if (shardRouting == null || shardRouting.assignedToNode() == false) { continue; @@ -537,14 +537,14 @@ public abstract class CcrIntegTestCase extends ESTestCase { IndexShard indexShard = cluster.getInstance(IndicesService.class, state.nodes().get(shardRouting.currentNodeId()).getName()) .indexServiceSafe(shardRouting.index()).getShard(shardRouting.id()); try { - final List docsOnShard = IndexShardTestCase.getDocIdAndSeqNos(indexShard); + final List docsOnShard = IndexShardTestCase.getDocIdAndSeqNos(indexShard); logger.info("--> shard {} docs {} seq_no_stats {}", shardRouting, docsOnShard, indexShard.seqNoStats()); docs.put(shardRouting.shardId().id(), docsOnShard.stream() // normalize primary term as the follower use its own term - .map(d -> new DocIdSeqNoAndTerm(d.getId(), d.getSeqNo(), 1L)) + .map(d -> new DocIdSeqNoAndSource(d.getId(), d.getSource(), d.getSeqNo(), 1L, d.getVersion())) .collect(Collectors.toList())); } catch (AlreadyClosedException e) { - // Ignore this exception and try getting List from other IndexShard instance. + // Ignore this exception and try getting List from other IndexShard instance. } } return docs; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index 86cea446b00..2931fba6f09 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -44,6 +44,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; @@ -87,6 +88,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +@TestLogging(value = "org.elasticsearch.xpack.ccr:trace") public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @@ -614,7 +616,6 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39509") public void testRetentionLeaseRenewalIsCancelledWhenFollowingIsPaused() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; @@ -921,7 +922,6 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { } } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/39850") public void testForgetFollower() throws Exception { final String leaderIndex = "leader"; final String followerIndex = "follower"; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index f729162c545..a4aeae12926 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -620,7 +620,7 @@ public class IndexFollowingIT extends CcrIntegTestCase { assertThat(response.getStatsResponses().get(0).status().readExceptions().size(), equalTo(1)); ElasticsearchException exception = response.getStatsResponses().get(0).status() .readExceptions().entrySet().iterator().next().getValue().v2(); - assertThat(exception.getRootCause().getMessage(), equalTo("blocked by: [FORBIDDEN/4/index closed];")); + assertThat(exception.getRootCause().getMessage(), equalTo("index [index1] blocked by: [FORBIDDEN/4/index closed];")); }); leaderClient().admin().indices().open(new OpenIndexRequest("index1")).actionGet(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index b298124e419..4a56d6370eb 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.engine.DocIdSeqNoAndTerm; +import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineTestCase; @@ -621,7 +621,7 @@ public class FollowingEngineTests extends ESTestCase { assertThat(failure.getExistingPrimaryTerm().getAsLong(), equalTo(operationWithTerms.get(op.seqNo()))); } } - for (DocIdSeqNoAndTerm docId : getDocIds(followingEngine, true)) { + for (DocIdSeqNoAndSource docId : getDocIds(followingEngine, true)) { assertThat(docId.getPrimaryTerm(), equalTo(operationWithTerms.get(docId.getSeqNo()))); } // Replica should accept duplicates @@ -633,7 +633,7 @@ public class FollowingEngineTests extends ESTestCase { Engine.Result result = applyOperation(followingEngine, op, newTerm, nonPrimary); assertThat(result.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); } - for (DocIdSeqNoAndTerm docId : getDocIds(followingEngine, true)) { + for (DocIdSeqNoAndSource docId : getDocIds(followingEngine, true)) { assertThat(docId.getPrimaryTerm(), equalTo(operationWithTerms.get(docId.getSeqNo()))); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java index 3c852a3ae40..1177b1d0318 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/common/network/InetAddressHelper.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.common.network; +import java.io.IOException; import java.net.InetAddress; -import java.net.SocketException; /** * We use this class to access the package private method in NetworkUtils to resolve anyLocalAddress InetAddresses for certificate @@ -16,7 +16,7 @@ public class InetAddressHelper { private InetAddressHelper() {} - public static InetAddress[] getAllAddresses() throws SocketException { + public static InetAddress[] getAllAddresses() throws IOException { return NetworkUtils.getAllAddresses(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 51b8894a54e..131069d27f6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -95,13 +95,18 @@ public class XPackLicenseState { switch (newMode) { case BASIC: switch (currentMode) { - case TRIAL: case STANDARD: + return new String[] { + "Security will default to disabled (set " + XPackSettings.SECURITY_ENABLED.getKey() + " to enable security).", + }; + case TRIAL: case GOLD: case PLATINUM: return new String[] { - "The following X-Pack security functionality will be disabled: authentication, authorization, " + - "ip filtering, and auditing. Please restart your node after applying the license.", + "Security will default to disabled (set " + XPackSettings.SECURITY_ENABLED.getKey() + " to enable security).", + "Authentication will be limited to the native and file realms.", + "Security tokens and API keys will not be supported.", + "IP filtering and auditing will be disabled.", "Field and document level access control will be disabled.", "Custom realms will be ignored.", "A custom authorization engine will be ignored." @@ -125,7 +130,7 @@ public class XPackLicenseState { case STANDARD: switch (currentMode) { case BASIC: - // ^^ though technically it was already disabled, it's not bad to remind them + // ^^ though technically it doesn't change the feature set, it's not bad to remind them case GOLD: case PLATINUM: case TRIAL: diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java index e027191d8a5..d31892692a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java @@ -21,8 +21,9 @@ public class DataFrameMessages { "Failed to validate data frame configuration"; public static final String REST_PUT_DATA_FRAME_FAILED_PERSIST_TRANSFORM_CONFIGURATION = "Failed to persist data frame configuration"; public static final String REST_PUT_DATA_FRAME_FAILED_TO_DEDUCE_DEST_MAPPINGS = "Failed to deduce dest mappings"; - public static final String REST_PUT_DATA_FRAME_FAILED_TO_CREATE_DEST_INDEX = "Failed to create dest index"; public static final String REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING = "Source index [{0}] does not exist"; + public static final String REST_PUT_DATA_FRAME_DEST_IN_SOURCE = "Destination index [{0}] is included in source expression [{1}]"; + public static final String REST_PUT_DATA_FRAME_DEST_SINGLE_INDEX = "Destination index [{0}] should refer to a single index"; public static final String REST_PUT_DATA_FRAME_INCONSISTENT_ID = "Inconsistent id; ''{0}'' specified in the body differs from ''{1}'' specified as a URL argument"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java index e7a43f252d6..99699c3a48c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java @@ -5,8 +5,10 @@ */ package org.elasticsearch.xpack.core.dataframe.action; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.common.Nullable; @@ -24,6 +26,7 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -167,6 +170,13 @@ public class StopDataFrameTransformAction extends Action taskFailures, + List nodeFailures, + boolean stopped) { + super(taskFailures, nodeFailures); + this.stopped = stopped; + } + public boolean isStopped() { return stopped; } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpoint.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java similarity index 99% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpoint.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java index f5d2dbad693..8b1cf8b42e3 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpoint.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.core.dataframe.transforms; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java index 670b1009d29..981a56e639d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java @@ -39,7 +39,7 @@ public class QueryConfig extends AbstractDiffable implements Writea private final Map source; private final QueryBuilder query; - static QueryConfig matchAll() { + public static QueryConfig matchAll() { return new QueryConfig(Collections.singletonMap(MatchAllQueryBuilder.NAME, Collections.emptyMap()), new MatchAllQueryBuilder()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index 636a3978443..ec7e0de9e34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -313,6 +313,7 @@ public abstract class AsyncTwoPhaseIndexer doSaveState(finishAndSetState(), position.get(), () -> {}), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java index b88056a4f24..4459a1d866e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/oidc/OpenIdConnectRealmSettings.java @@ -76,8 +76,6 @@ public class OpenIdConnectRealmSettings { RealmSettings.realmSettingPrefix(TYPE), "rp.requested_scopes", key -> Setting.listSetting(key, Collections.singletonList("openid"), Function.identity(), Setting.Property.NodeScope)); - public static final Setting.AffixSetting OP_NAME - = RealmSettings.simpleString(TYPE, "op.name", Setting.Property.NodeScope); public static final Setting.AffixSetting OP_AUTHORIZATION_ENDPOINT = Setting.affixKeySetting(RealmSettings.realmSettingPrefix(TYPE), "op.authorization_endpoint", key -> Setting.simpleString(key, v -> { @@ -151,7 +149,7 @@ public class OpenIdConnectRealmSettings { public static Set> getSettings() { final Set> set = Sets.newHashSet( RP_CLIENT_ID, RP_REDIRECT_URI, RP_RESPONSE_TYPE, RP_REQUESTED_SCOPES, RP_CLIENT_SECRET, RP_SIGNATURE_ALGORITHM, - RP_POST_LOGOUT_REDIRECT_URI, OP_NAME, OP_AUTHORIZATION_ENDPOINT, OP_TOKEN_ENDPOINT, OP_USERINFO_ENDPOINT, + RP_POST_LOGOUT_REDIRECT_URI, OP_AUTHORIZATION_ENDPOINT, OP_TOKEN_ENDPOINT, OP_USERINFO_ENDPOINT, OP_ENDSESSION_ENDPOINT, OP_ISSUER, OP_JWKSET_PATH, HTTP_CONNECT_TIMEOUT, HTTP_CONNECTION_READ_TIMEOUT, HTTP_SOCKET_TIMEOUT, HTTP_MAX_CONNECTIONS, HTTP_MAX_ENDPOINT_CONNECTIONS, ALLOWED_CLOCK_SKEW); set.addAll(DelegatedAuthorizationSettings.getSettings(TYPE)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 9fcec79c453..bc8d7817f4d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -62,6 +62,10 @@ public class XPackLicenseStateTests extends ESTestCase { return randomFrom(TRIAL, PLATINUM); } + public static OperationMode randomTrialGoldOrPlatinumMode() { + return randomFrom(TRIAL, GOLD, PLATINUM); + } + public static OperationMode randomTrialBasicStandardGoldOrPlatinumMode() { return randomFrom(TRIAL, BASIC, STANDARD, GOLD, PLATINUM); } @@ -263,8 +267,12 @@ public class XPackLicenseStateTests extends ESTestCase { assertAckMesssages(XPackField.SECURITY, randomMode(), randomTrialOrPlatinumMode(), 0); } - public void testSecurityAckTrialStandardGoldOrPlatinumToBasic() { - assertAckMesssages(XPackField.SECURITY, randomTrialStandardGoldOrPlatinumMode(), BASIC, 4); + public void testSecurityAckTrialGoldOrPlatinumToBasic() { + assertAckMesssages(XPackField.SECURITY, randomTrialGoldOrPlatinumMode(), BASIC, 7); + } + + public void testSecurityAckStandardToBasic() { + assertAckMesssages(XPackField.SECURITY, STANDARD, BASIC, 1); } public void testSecurityAckAnyToStandard() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractSerializingDataFrameTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractSerializingDataFrameTestCase.java new file mode 100644 index 00000000000..8b633cdfc26 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractSerializingDataFrameTestCase.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.action; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.junit.Before; + +import java.util.List; + +import static java.util.Collections.emptyList; + +public abstract class AbstractSerializingDataFrameTestCase + extends AbstractSerializingTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + private NamedXContentRegistry namedXContentRegistry; + + @Before + public void registerNamedObjects() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + + List namedWriteables = searchModule.getNamedWriteables(); + List namedXContents = searchModule.getNamedXContents(); + + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + namedXContentRegistry = new NamedXContentRegistry(namedXContents); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return namedXContentRegistry; + } + +} \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractWireSerializingDataFrameTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractWireSerializingDataFrameTestCase.java index e092bbbb768..91a7ec54dd2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractWireSerializingDataFrameTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractWireSerializingDataFrameTestCase.java @@ -14,6 +14,8 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.junit.Before; +import java.util.List; + import static java.util.Collections.emptyList; public abstract class AbstractWireSerializingDataFrameTestCase extends AbstractWireSerializingTestCase { @@ -24,12 +26,14 @@ public abstract class AbstractWireSerializingDataFrameTestCase namedWriteables = searchModule.getNamedWriteables(); + List namedXContents = searchModule.getNamedXContents(); + + namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); + namedXContentRegistry = new NamedXContentRegistry(namedXContents); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java index 267725d9a69..0cfc659e506 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java @@ -7,48 +7,20 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction.Request; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; -import org.junit.Before; import java.io.IOException; -import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; -public class PreviewDataFrameTransformActionRequestTests extends AbstractSerializingTestCase { - - private NamedWriteableRegistry namedWriteableRegistry; - private NamedXContentRegistry namedXContentRegistry; - - @Before - public void registerAggregationNamedObjects() { - // register aggregations as NamedWriteable - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); - namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); - namedXContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); - } - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return namedWriteableRegistry; - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - return namedXContentRegistry; - } +public class PreviewDataFrameTransformActionRequestTests extends AbstractSerializingDataFrameTestCase { @Override protected Request doParseInstance(XContentParser parser) throws IOException { @@ -68,7 +40,8 @@ public class PreviewDataFrameTransformActionRequestTests extends AbstractSeriali @Override protected Request createTestInstance() { DataFrameTransformConfig config = new DataFrameTransformConfig("transform-preview", randomSourceConfig(), - new DestConfig("unused-transform-preview-index"), null, PivotConfigTests.randomPivotConfig(), null); + new DestConfig("unused-transform-preview-index"), + null, PivotConfigTests.randomPivotConfig(), null); return new Request(config); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java index 95d0d3f7e4c..94d4b2e20cd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java @@ -6,13 +6,8 @@ package org.elasticsearch.xpack.core.dataframe.action; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.SearchModule; -import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction.Request; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; @@ -20,33 +15,9 @@ import org.junit.Before; import java.io.IOException; -import static java.util.Collections.emptyList; - -public class PutDataFrameTransformActionRequestTests extends AbstractSerializingTestCase { - +public class PutDataFrameTransformActionRequestTests extends AbstractSerializingDataFrameTestCase { private String transformId; - private NamedWriteableRegistry namedWriteableRegistry; - private NamedXContentRegistry namedXContentRegistry; - - @Before - public void registerAggregationNamedObjects() throws Exception { - // register aggregations as NamedWriteable - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); - namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); - namedXContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); - } - - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return namedWriteableRegistry; - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - return namedXContentRegistry; - } - @Before public void setupTransformId() { transformId = randomAlphaOfLengthBetween(1, 10); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java similarity index 98% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java index 786fafc2c07..ad4f068870b 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformCheckpointTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java @@ -4,14 +4,13 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.core.dataframe.transforms; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.transforms.AbstractSerializingDataFrameTestCase; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/data-frame/build.gradle b/x-pack/plugin/data-frame/build.gradle index ad4d846fd16..e065f72e998 100644 --- a/x-pack/plugin/data-frame/build.gradle +++ b/x-pack/plugin/data-frame/build.gradle @@ -15,10 +15,6 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } -run { - plugin xpackModule('core') -} - // xpack modules are installed in real clusters as the meta plugin, so // installing them as individual plugins for integ tests doesn't make sense, // so we disable integ tests diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java index 62101e4e120..d9927cd09ed 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java @@ -103,6 +103,14 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { stats = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", stats)); + transformsStats = (List>)XContentMapValues.extractValue("transforms", stats); + assertEquals(1, transformsStats.size()); + Map state = (Map) XContentMapValues.extractValue("state", transformsStats.get(0)); + assertEquals(1, transformsStats.size()); + assertEquals("started", XContentMapValues.extractValue("task_state", state)); + assertEquals(null, XContentMapValues.extractValue("current_position", state)); + assertEquals(1, XContentMapValues.extractValue("checkpoint", state)); + // check all the different ways to retrieve all transforms getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT, authHeader); Map transforms = entityAsMap(client().performRequest(getRequest)); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 3a273cde27e..75e179e5dee 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -214,9 +214,9 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { config += " \"pivot\": {" + " \"group_by\": {" - + " \"by_day\": {" + + " \"by_hr\": {" + " \"date_histogram\": {" - + " \"interval\": \"1d\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD\"" + + " \"interval\": \"1h\",\"field\":\"timestamp\",\"format\":\"yyyy-MM-DD_HH\"" + " } } }," + " \"aggregations\": {" + " \"avg_rating\": {" @@ -232,10 +232,9 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); assertTrue(indexExists(dataFrameIndex)); - // we expect 21 documents as there shall be 21 days worth of docs Map indexStats = getAsMap(dataFrameIndex + "/_stats"); - assertEquals(21, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); - assertOnePivotValue(dataFrameIndex + "/_search?q=by_day:2017-01-15", 3.82); + assertEquals(104, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); + assertOnePivotValue(dataFrameIndex + "/_search?q=by_hr:1484499600000", 4.0833333333); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index c0e6c97fd69..89047219f40 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -89,14 +89,20 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { // create index final StringBuilder bulk = new StringBuilder(); int day = 10; + int hour = 10; + int min = 10; for (int i = 0; i < numDocs; i++) { bulk.append("{\"index\":{\"_index\":\"" + REVIEWS_INDEX_NAME + "\"}}\n"); long user = Math.round(Math.pow(i * 31 % 1000, distributionTable[i % distributionTable.length]) % 27); int stars = distributionTable[(i * 33) % distributionTable.length]; long business = Math.round(Math.pow(user * stars, distributionTable[i % distributionTable.length]) % 13); - int hour = randomIntBetween(10, 20); - int min = randomIntBetween(30, 59); - int sec = randomIntBetween(30, 59); + if (i % 12 == 0) { + hour = 10 + (i % 13); + } + if (i % 5 == 0) { + min = 10 + (i % 49); + } + int sec = 10 + (i % 49); String date_string = "2017-01-" + day + "T" + hour + ":" + min + ":" + sec + "Z"; bulk.append("{\"user_id\":\"") diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java new file mode 100644 index 00000000000..1b2c54b331f --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.action; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; + +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public final class DataFrameNodes { + + private DataFrameNodes() { + } + + /** + * Get the list of nodes the data frames are executing on + * + * @param dataFrameIds The data frames. + * @param clusterState State + * @return The executor nodes + */ + public static String[] dataFrameTaskNodes(List dataFrameIds, ClusterState clusterState) { + + Set executorNodes = new HashSet<>(); + + PersistentTasksCustomMetaData tasksMetaData = + PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(clusterState); + + if (tasksMetaData != null) { + Set dataFrameIdsSet = new HashSet<>(dataFrameIds); + + Collection> tasks = + tasksMetaData.findTasks(DataFrameField.TASK_NAME, t -> dataFrameIdsSet.contains(t.getId())); + + for (PersistentTasksCustomMetaData.PersistentTask task : tasks) { + executorNodes.add(task.getExecutorNode()); + } + } + + return executorNodes.toArray(new String[0]); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index 93658c2f4f4..7ab5f280014 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; @@ -99,6 +100,8 @@ public class TransportGetDataFrameTransformsStatsAction extends @Override protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { // Little extra insurance, make sure we only return transforms that aren't cancelled + ClusterState state = clusterService.state(); + String nodeId = state.nodes().getLocalNode().getId(); if (task.isCancelled() == false) { transformsCheckpointService.getCheckpointStats(task.getTransformId(), task.getCheckpoint(), task.getInProgressCheckpoint(), ActionListener.wrap(checkpointStats -> { @@ -109,7 +112,7 @@ public class TransportGetDataFrameTransformsStatsAction extends Collections.singletonList(new DataFrameTransformStateAndStats(task.getTransformId(), task.getState(), task.getStats(), DataFrameTransformCheckpointingInfo.EMPTY)), Collections.emptyList(), - Collections.singletonList(new FailedNodeException("", "Failed to retrieve checkpointing info", e)))); + Collections.singletonList(new FailedNodeException(nodeId, "Failed to retrieve checkpointing info", e)))); })); } else { listener.onResponse(new Response(Collections.emptyList())); @@ -121,6 +124,7 @@ public class TransportGetDataFrameTransformsStatsAction extends dataFrameTransformsConfigManager.expandTransformIds(request.getId(), request.getPageParams(), ActionListener.wrap( ids -> { request.setExpandedIds(ids); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(ids, clusterService.state())); super.doExecute(task, request, ActionListener.wrap( response -> collectStatsForTransformsWithoutTasks(request, response, finalListener), finalListener::onFailure diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index 5b361273050..f4b93cc6ac4 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -6,24 +6,31 @@ package org.elasticsearch.xpack.dataframe.action; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.util.List; @@ -39,15 +46,20 @@ public class TransportPreviewDataFrameTransformAction extends private final XPackLicenseState licenseState; private final Client client; private final ThreadPool threadPool; + private final IndexNameExpressionResolver indexNameExpressionResolver; + private final ClusterService clusterService; @Inject public TransportPreviewDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, - Client client, ThreadPool threadPool, XPackLicenseState licenseState) { - super(PreviewDataFrameTransformAction.NAME,transportService, actionFilters, - (Writeable.Reader) PreviewDataFrameTransformAction.Request::new); + Client client, ThreadPool threadPool, XPackLicenseState licenseState, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService) { + super(PreviewDataFrameTransformAction.NAME,transportService, actionFilters, PreviewDataFrameTransformAction.Request::new); this.licenseState = licenseState; this.client = client; this.threadPool = threadPool; + this.clusterService = clusterService; + this.indexNameExpressionResolver = indexNameExpressionResolver; } @Override @@ -59,26 +71,35 @@ public class TransportPreviewDataFrameTransformAction extends return; } + ClusterState clusterState = clusterService.state(); + final DataFrameTransformConfig config = request.getConfig(); + for(String src : config.getSource().getIndex()) { + String[] concreteNames = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), src); + if (concreteNames.length == 0) { + listener.onFailure(new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, src), + RestStatus.BAD_REQUEST)); + return; + } + } - Pivot pivot = new Pivot(config.getSource().getIndex(), - config.getSource().getQueryConfig().getQuery(), - config.getPivotConfig()); + Pivot pivot = new Pivot(config.getPivotConfig()); - getPreview(pivot, ActionListener.wrap( + getPreview(pivot, config.getSource(), ActionListener.wrap( previewResponse -> listener.onResponse(new PreviewDataFrameTransformAction.Response(previewResponse)), listener::onFailure )); } - private void getPreview(Pivot pivot, ActionListener>> listener) { - pivot.deduceMappings(client, ActionListener.wrap( + private void getPreview(Pivot pivot, SourceConfig source, ActionListener>> listener) { + pivot.deduceMappings(client, source, ActionListener.wrap( deducedMappings -> { ClientHelper.executeWithHeadersAsync(threadPool.getThreadContext().getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, SearchAction.INSTANCE, - pivot.buildSearchRequest(null, NUMBER_OF_PREVIEW_BUCKETS), + pivot.buildSearchRequest(source, null, NUMBER_OF_PREVIEW_BUCKETS), ActionListener.wrap( r -> { final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java index edcd0689290..0b8ef692cdd 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -51,7 +52,12 @@ import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigMa import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; public class TransportPutDataFrameTransformAction @@ -114,14 +120,54 @@ public class TransportPutDataFrameTransformAction DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, transformId))); return; } - + final String destIndex = config.getDestination().getIndex(); + Set concreteSourceIndexNames = new HashSet<>(); for(String src : config.getSource().getIndex()) { - if (indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), src).length == 0) { + String[] concreteNames = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), src); + if (concreteNames.length == 0) { listener.onFailure(new ElasticsearchStatusException( DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, src), RestStatus.BAD_REQUEST)); return; } + if (Regex.simpleMatch(src, destIndex)) { + listener.onFailure(new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, destIndex, src), + RestStatus.BAD_REQUEST + )); + return; + } + concreteSourceIndexNames.addAll(Arrays.asList(concreteNames)); + } + + if (concreteSourceIndexNames.contains(destIndex)) { + listener.onFailure(new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, + destIndex, + Strings.arrayToCommaDelimitedString(config.getSource().getIndex())), + RestStatus.BAD_REQUEST + )); + return; + } + + final String[] concreteDest = + indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), destIndex); + + if (concreteDest.length > 1 || Regex.isSimpleMatchPattern(destIndex)) { + listener.onFailure(new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_SINGLE_INDEX, destIndex), + RestStatus.BAD_REQUEST + )); + return; + } + if (concreteDest.length > 0 && concreteSourceIndexNames.contains(concreteDest[0])) { + listener.onFailure(new ElasticsearchStatusException( + DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, + concreteDest[0], + Strings.arrayToCommaDelimitedString(concreteSourceIndexNames.toArray(new String[0]))), + RestStatus.BAD_REQUEST + )); + return; } // Early check to verify that the user can create the destination index and can read from the source @@ -131,18 +177,16 @@ public class TransportPutDataFrameTransformAction .indices(config.getSource().getIndex()) .privileges("read") .build(); - String[] destPrivileges = new String[3]; - destPrivileges[0] = "read"; - destPrivileges[1] = "index"; + List destPrivileges = new ArrayList<>(3); + destPrivileges.add("read"); + destPrivileges.add("index"); // If the destination index does not exist, we can assume that we may have to create it on start. // We should check that the creating user has the privileges to create the index. - if (indexNameExpressionResolver.concreteIndexNames(clusterState, - IndicesOptions.lenientExpandOpen(), - config.getDestination().getIndex()).length == 0) { - destPrivileges[2] = "create_index"; + if (concreteDest.length == 0) { + destPrivileges.add("create_index"); } RoleDescriptor.IndicesPrivileges destIndexPrivileges = RoleDescriptor.IndicesPrivileges.builder() - .indices(config.getDestination().getIndex()) + .indices(destIndex) .privileges(destPrivileges) .build(); @@ -151,7 +195,6 @@ public class TransportPutDataFrameTransformAction privRequest.username(username); privRequest.clusterPrivileges(Strings.EMPTY_ARRAY); privRequest.indexPrivileges(sourceIndexPrivileges, destIndexPrivileges); - ActionListener privResponseListener = ActionListener.wrap( r -> handlePrivsResponse(username, config, r, listener), listener::onFailure); @@ -190,9 +233,7 @@ public class TransportPutDataFrameTransformAction private void putDataFrame(DataFrameTransformConfig config, ActionListener listener) { - final Pivot pivot = new Pivot(config.getSource().getIndex(), - config.getSource().getQueryConfig().getQuery(), - config.getPivotConfig()); + final Pivot pivot = new Pivot(config.getPivotConfig()); // <5> Return the listener, or clean up destination index on failure. @@ -210,6 +251,6 @@ public class TransportPutDataFrameTransformAction ); // <1> Validate our pivot - pivot.validate(client, pivotValidationListener); + pivot.validate(client, config.getSource(), pivotValidationListener); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java index 98e70fa2578..39c0c74bbd5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -224,9 +224,7 @@ public class TransportStartDataFrameTransformAction extends private void createDestinationIndex(final DataFrameTransformConfig config, final ActionListener listener) { - final Pivot pivot = new Pivot(config.getSource().getIndex(), - config.getSource().getQueryConfig().getQuery(), - config.getPivotConfig()); + final Pivot pivot = new Pivot(config.getPivotConfig()); ActionListener> deduceMappingsListener = ActionListener.wrap( mappings -> DataframeIndex.createDestinationIndex(client, @@ -238,7 +236,7 @@ public class TransportStartDataFrameTransformAction extends deduceTargetMappingsException)) ); - pivot.deduceMappings(client, deduceMappingsListener); + pivot.deduceMappings(client, config.getSource(), deduceMappingsListener); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 2092493caaf..120f1ef7759 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -13,29 +13,24 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; -import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; -import static org.elasticsearch.ExceptionsHelper.convertToElastic; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; public class TransportStopDataFrameTransformAction extends @@ -63,7 +58,7 @@ public class TransportStopDataFrameTransformAction extends dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap( expandedIds -> { request.setExpandedIds(new HashSet<>(expandedIds)); - request.setNodes(dataframeNodes(expandedIds, clusterService.state())); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(expandedIds, clusterService.state())); super.doExecute(task, request, listener); }, listener::onFailure @@ -136,48 +131,12 @@ public class TransportStopDataFrameTransformAction extends List tasks, List taskOperationFailures, List failedNodeExceptions) { - if (taskOperationFailures.isEmpty() == false) { - throw convertToElastic(taskOperationFailures.get(0).getCause()); - } else if (failedNodeExceptions.isEmpty() == false) { - throw convertToElastic(failedNodeExceptions.get(0)); - } - - // Either the transform doesn't exist (the user didn't create it yet) or was deleted - // after the Stop API executed. - // In either case, let the user know - if (tasks.size() == 0) { - if (taskOperationFailures.isEmpty() == false) { - throw convertToElastic(taskOperationFailures.get(0).getCause()); - } else if (failedNodeExceptions.isEmpty() == false) { - throw convertToElastic(failedNodeExceptions.get(0)); - } else { - // This can happen we the actual task in the node no longer exists, or was never started - return new StopDataFrameTransformAction.Response(true); - } + if (taskOperationFailures.isEmpty() == false || failedNodeExceptions.isEmpty() == false) { + return new StopDataFrameTransformAction.Response(taskOperationFailures, failedNodeExceptions, false); } + // if tasks is empty allMatch is 'vacuously satisfied' boolean allStopped = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isStopped); return new StopDataFrameTransformAction.Response(allStopped); } - - static String[] dataframeNodes(List dataFrameIds, ClusterState clusterState) { - - Set executorNodes = new HashSet<>(); - - PersistentTasksCustomMetaData tasksMetaData = - PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(clusterState); - - if (tasksMetaData != null) { - Set dataFrameIdsSet = new HashSet<>(dataFrameIds); - - Collection> tasks = - tasksMetaData.findTasks(DataFrameField.TASK_NAME, t -> dataFrameIdsSet.contains(t.getId())); - - for (PersistentTasksCustomMetaData.PersistentTask task : tasks) { - executorNodes.add(task.getExecutorNode()); - } - } - - return executorNodes.toArray(new String[0]); - } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java index 5008934128e..6fc2e334f92 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java @@ -9,7 +9,6 @@ package org.elasticsearch.xpack.dataframe.checkpoint; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; @@ -17,19 +16,17 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.client.Client; import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; import java.util.Arrays; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.TreeMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; /** * DataFrameTransform Checkpoint Service @@ -41,17 +38,22 @@ import java.util.concurrent.TimeUnit; */ public class DataFrameTransformsCheckpointService { - private class Checkpoints { + private static class Checkpoints { DataFrameTransformCheckpoint currentCheckpoint = DataFrameTransformCheckpoint.EMPTY; DataFrameTransformCheckpoint inProgressCheckpoint = DataFrameTransformCheckpoint.EMPTY; DataFrameTransformCheckpoint sourceCheckpoint = DataFrameTransformCheckpoint.EMPTY; + + DataFrameTransformCheckpointingInfo buildInfo() { + return new DataFrameTransformCheckpointingInfo( + new DataFrameTransformCheckpointStats(currentCheckpoint.getTimestamp(), currentCheckpoint.getTimeUpperBound()), + new DataFrameTransformCheckpointStats(inProgressCheckpoint.getTimestamp(), inProgressCheckpoint.getTimeUpperBound()), + DataFrameTransformCheckpoint.getBehind(currentCheckpoint, sourceCheckpoint)); + } + } private static final Logger logger = LogManager.getLogger(DataFrameTransformsCheckpointService.class); - // timeout for retrieving checkpoint information - private static final int CHECKPOINT_STATS_TIMEOUT_SECONDS = 5; - private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; @@ -86,40 +88,49 @@ public class DataFrameTransformsCheckpointService { long timeUpperBound = 0; // 1st get index to see the indexes the user has access to - GetIndexRequest getIndexRequest = new GetIndexRequest().indices(transformConfig.getSource().getIndex()); + GetIndexRequest getIndexRequest = new GetIndexRequest() + .indices(transformConfig.getSource().getIndex()) + .features(new GetIndexRequest.Feature[0]); ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, GetIndexAction.INSTANCE, getIndexRequest, ActionListener.wrap(getIndexResponse -> { Set userIndices = new HashSet<>(Arrays.asList(getIndexResponse.getIndices())); - // 2nd get stats request - ClientHelper.executeAsyncWithOrigin(client, ClientHelper.DATA_FRAME_ORIGIN, IndicesStatsAction.INSTANCE, - new IndicesStatsRequest().indices(transformConfig.getSource().getIndex()), ActionListener.wrap(response -> { + ClientHelper.executeAsyncWithOrigin(client, + ClientHelper.DATA_FRAME_ORIGIN, + IndicesStatsAction.INSTANCE, + new IndicesStatsRequest() + .indices(transformConfig.getSource().getIndex()) + .clear(), + ActionListener.wrap( + response -> { if (response.getFailedShards() != 0) { - throw new CheckpointException("Source has [" + response.getFailedShards() + "] failed shards"); + listener.onFailure( + new CheckpointException("Source has [" + response.getFailedShards() + "] failed shards")); + return; } - - Map checkpointsByIndex = extractIndexCheckPoints(response.getShards(), userIndices); - DataFrameTransformCheckpoint checkpointDoc = new DataFrameTransformCheckpoint(transformConfig.getId(), - timestamp, checkpoint, checkpointsByIndex, timeUpperBound); - - listener.onResponse(checkpointDoc); - - }, IndicesStatsRequestException -> { - throw new CheckpointException("Failed to retrieve indices stats", IndicesStatsRequestException); - })); - - }, getIndexException -> { - throw new CheckpointException("Failed to retrieve list of indices", getIndexException); - })); + try { + Map checkpointsByIndex = extractIndexCheckPoints(response.getShards(), userIndices); + listener.onResponse(new DataFrameTransformCheckpoint(transformConfig.getId(), + timestamp, + checkpoint, + checkpointsByIndex, + timeUpperBound)); + } catch (CheckpointException checkpointException) { + listener.onFailure(checkpointException); + } + }, + listener::onFailure + )); + }, + listener::onFailure + )); } /** * Get checkpointing stats for a data frame * - * Implementation details: - * - fires up to 3 requests _in parallel_ rather than cascading them * * @param transformId The data frame task * @param currentCheckpoint the current checkpoint @@ -132,71 +143,66 @@ public class DataFrameTransformsCheckpointService { long inProgressCheckpoint, ActionListener listener) { - // process in parallel: current checkpoint, in-progress checkpoint, current state of the source - CountDownLatch latch = new CountDownLatch(3); - - // ensure listener is called exactly once - final ActionListener wrappedListener = ActionListener.notifyOnce(listener); - - // holder structure for writing the results of the 3 parallel tasks Checkpoints checkpoints = new Checkpoints(); - // get the current checkpoint - if (currentCheckpoint != 0) { - dataFrameTransformsConfigManager.getTransformCheckpoint(transformId, currentCheckpoint, - new LatchedActionListener<>(ActionListener.wrap(checkpoint -> checkpoints.currentCheckpoint = checkpoint, e -> { - logger.debug("Failed to retrieve checkpoint [" + currentCheckpoint + "] for data frame []" + transformId, e); - wrappedListener - .onFailure(new CheckpointException("Failed to retrieve current checkpoint [" + currentCheckpoint + "]", e)); - }), latch)); - } else { - latch.countDown(); - } - - // get the in-progress checkpoint - if (inProgressCheckpoint != 0) { - dataFrameTransformsConfigManager.getTransformCheckpoint(transformId, inProgressCheckpoint, - new LatchedActionListener<>(ActionListener.wrap(checkpoint -> checkpoints.inProgressCheckpoint = checkpoint, e -> { - logger.debug("Failed to retrieve in progress checkpoint [" + inProgressCheckpoint + "] for data frame [" - + transformId + "]", e); - wrappedListener.onFailure( - new CheckpointException("Failed to retrieve in progress checkpoint [" + inProgressCheckpoint + "]", e)); - }), latch)); - } else { - latch.countDown(); - } - - // get the current state - dataFrameTransformsConfigManager.getTransformConfiguration(transformId, ActionListener.wrap(transformConfig -> { - getCheckpoint(transformConfig, - new LatchedActionListener<>(ActionListener.wrap(checkpoint -> checkpoints.sourceCheckpoint = checkpoint, e2 -> { - logger.debug("Failed to retrieve actual checkpoint for data frame [" + transformId + "]", e2); - wrappedListener.onFailure(new CheckpointException("Failed to retrieve actual checkpoint", e2)); - }), latch)); - }, e -> { - logger.warn("Failed to retrieve configuration for data frame [" + transformId + "]", e); - wrappedListener.onFailure(new CheckpointException("Failed to retrieve configuration", e)); - latch.countDown(); - })); - - try { - if (latch.await(CHECKPOINT_STATS_TIMEOUT_SECONDS, TimeUnit.SECONDS)) { - logger.debug("Retrieval of checkpoint information succeeded for data frame [" + transformId + "]"); - wrappedListener.onResponse(new DataFrameTransformCheckpointingInfo( - new DataFrameTransformCheckpointStats(checkpoints.currentCheckpoint.getTimestamp(), - checkpoints.currentCheckpoint.getTimeUpperBound()), - new DataFrameTransformCheckpointStats(checkpoints.inProgressCheckpoint.getTimestamp(), - checkpoints.inProgressCheckpoint.getTimeUpperBound()), - DataFrameTransformCheckpoint.getBehind(checkpoints.currentCheckpoint, checkpoints.sourceCheckpoint))); - } else { - // timed out - logger.debug("Retrieval of checkpoint information has timed out for data frame [" + transformId + "]"); - wrappedListener.onFailure(new CheckpointException("Retrieval of checkpoint information has timed out")); + // <3> notify the user once we have the current checkpoint + ActionListener currentCheckpointListener = ActionListener.wrap( + currentCheckpointObj -> { + checkpoints.currentCheckpoint = currentCheckpointObj; + listener.onResponse(checkpoints.buildInfo()); + }, + e -> { + logger.debug("Failed to retrieve current checkpoint [" + + currentCheckpoint + "] for data frame [" + transformId + "]", e); + listener.onFailure(new CheckpointException("Failure during current checkpoint info retrieval", e)); } - } catch (InterruptedException e) { - logger.debug("Failed to retrieve checkpoints for data frame [" + transformId + "]", e); - wrappedListener.onFailure(new CheckpointException("Failure during checkpoint info retrieval", e)); - } + ); + + // <2> after the in progress checkpoint, get the current checkpoint + ActionListener inProgressCheckpointListener = ActionListener.wrap( + inProgressCheckpointObj -> { + checkpoints.inProgressCheckpoint = inProgressCheckpointObj; + if (currentCheckpoint != 0) { + dataFrameTransformsConfigManager.getTransformCheckpoint(transformId, + currentCheckpoint, + currentCheckpointListener); + } else { + currentCheckpointListener.onResponse(DataFrameTransformCheckpoint.EMPTY); + } + }, + e -> { + logger.debug("Failed to retrieve in progress checkpoint [" + + inProgressCheckpoint + "] for data frame [" + transformId + "]", e); + listener.onFailure(new CheckpointException("Failure during in progress checkpoint info retrieval", e)); + } + ); + + // <1> after the source checkpoint, get the in progress checkpoint + ActionListener sourceCheckpointListener = ActionListener.wrap( + sourceCheckpoint -> { + checkpoints.sourceCheckpoint = sourceCheckpoint; + if (inProgressCheckpoint != 0) { + dataFrameTransformsConfigManager.getTransformCheckpoint(transformId, + inProgressCheckpoint, + inProgressCheckpointListener); + } else { + inProgressCheckpointListener.onResponse(DataFrameTransformCheckpoint.EMPTY); + } + }, + e -> { + logger.debug("Failed to retrieve source checkpoint for data frame [" + transformId + "]", e); + listener.onFailure(new CheckpointException("Failure during source checkpoint info retrieval", e)); + } + ); + + // <0> get the transform and the source, transient checkpoint + dataFrameTransformsConfigManager.getTransformConfiguration(transformId, ActionListener.wrap( + transformConfig -> getCheckpoint(transformConfig, sourceCheckpointListener), + transformError -> { + logger.warn("Failed to retrieve configuration for data frame [" + transformId + "]", transformError); + listener.onFailure(new CheckpointException("Failed to retrieve configuration", transformError)); + }) + ); } static Map extractIndexCheckPoints(ShardStats[] shards, Set userIndices) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java index f8b8bc6dc83..e8c1e012b7b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java @@ -45,8 +45,8 @@ import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; import java.io.IOException; import java.io.InputStream; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java index a61deeae18d..f97af415028 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java @@ -18,6 +18,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; +import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource; import java.io.IOException; import java.util.Map; @@ -31,7 +33,9 @@ public final class DataframeIndex { public static final String DOC_TYPE = "_doc"; private static final String PROPERTIES = "properties"; private static final String TYPE = "type"; + private static final String FORMAT = "format"; private static final String META = "_meta"; + private static final String DEFAULT_TIME_FORMAT = "strict_date_optional_time||epoch_millis"; private DataframeIndex() { } @@ -44,7 +48,9 @@ public final class DataframeIndex { request.settings(Settings.builder() // <1> .put("index.number_of_shards", 1).put("index.number_of_replicas", 0)); - request.mapping(DOC_TYPE, createMappingXContent(mappings, transformConfig.getId())); + request.mapping(DOC_TYPE, createMappingXContent(mappings, + transformConfig.getPivotConfig().getGroupConfig().getGroups(), + transformConfig.getId())); client.execute(CreateIndexAction.INSTANCE, request, ActionListener.wrap(createIndexResponse -> { listener.onResponse(true); @@ -56,14 +62,29 @@ public final class DataframeIndex { })); } - private static XContentBuilder createMappingXContent(Map mappings, String id) { + private static XContentBuilder createMappingXContent(Map mappings, + Map groupSources, + String id) { try { XContentBuilder builder = jsonBuilder().startObject(); builder.startObject(DOC_TYPE); addMetaData(builder, id); builder.startObject(PROPERTIES); for (Entry field : mappings.entrySet()) { - builder.startObject(field.getKey()).field(TYPE, field.getValue()).endObject(); + String fieldName = field.getKey(); + String fieldType = field.getValue(); + + builder.startObject(fieldName); + builder.field(TYPE, fieldType); + + SingleGroupSource groupSource = groupSources.get(fieldName); + if (groupSource instanceof DateHistogramGroupSource) { + String format = ((DateHistogramGroupSource) groupSource).getFormat(); + if (format != null) { + builder.field(FORMAT, DEFAULT_TIME_FORMAT + "||" + format); + } + } + builder.endObject(); } builder.endObject(); // properties builder.endObject(); // doc_type diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/BaseTasksResponseToXContentListener.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/BaseTasksResponseToXContentListener.java new file mode 100644 index 00000000000..def26a52efb --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/BaseTasksResponseToXContentListener.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.rest.action; + +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; + + +class BaseTasksResponseToXContentListener extends RestToXContentListener { + + BaseTasksResponseToXContentListener(RestChannel channel) { + super(channel); + } + + @Override + protected RestStatus getStatus(T response) { + if (response.getNodeFailures().size() > 0 || response.getTaskFailures().size() > 0) { + return RestStatus.INTERNAL_SERVER_ERROR; + } + return RestStatus.OK; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java index 0efa3ffa2c5..183952e0603 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java @@ -11,8 +11,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; @@ -35,15 +33,7 @@ public class RestDeleteDataFrameTransformAction extends BaseRestHandler { DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id); return channel -> client.execute(DeleteDataFrameTransformAction.INSTANCE, request, - new RestToXContentListener(channel) { - @Override - protected RestStatus getStatus(DeleteDataFrameTransformAction.Response response) { - if (response.getNodeFailures().size() > 0 || response.getTaskFailures().size() > 0) { - return RestStatus.INTERNAL_SERVER_ERROR; - } - return RestStatus.OK; - } - }); + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java index 87cc13edbc3..f2d14f81069 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; @@ -33,7 +32,8 @@ public class RestGetDataFrameTransformsStatsAction extends BaseRestHandler { new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM), restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE))); } - return channel -> client.execute(GetDataFrameTransformsStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(GetDataFrameTransformsStatsAction.INSTANCE, request, + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java index 1d9b3f29a61..764aeca4a64 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; @@ -31,7 +30,8 @@ public class RestStartDataFrameTransformAction extends BaseRestHandler { boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); StartDataFrameTransformAction.Request request = new StartDataFrameTransformAction.Request(id, force); request.timeout(restRequest.paramAsTime(DataFrameField.TIMEOUT.getPreferredName(), AcknowledgedRequest.DEFAULT_ACK_TIMEOUT)); - return channel -> client.execute(StartDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(StartDataFrameTransformAction.INSTANCE, request, + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java index e93898b905b..d34478b9ba9 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; @@ -34,7 +33,8 @@ public class RestStopDataFrameTransformAction extends BaseRestHandler { StopDataFrameTransformAction.Request request = new StopDataFrameTransformAction.Request(id, waitForCompletion, force, timeout); - return channel -> client.execute(StopDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(StopDataFrameTransformAction.INSTANCE, request, + new BaseTasksResponseToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java index 5fde9a1cac6..f2fc71da7f0 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -8,23 +8,21 @@ package org.elasticsearch.xpack.dataframe.transforms; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.Nullable; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.indexing.IterationResult; @@ -33,6 +31,7 @@ import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.concurrent.Executor; @@ -50,31 +49,46 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer fieldMappings; + private Pivot pivot; private int pageSize = 0; public DataFrameIndexer(Executor executor, DataFrameAuditor auditor, + DataFrameTransformConfig transformConfig, + Map fieldMappings, AtomicReference initialState, Map initialPosition, - DataFrameIndexerTransformStats jobStats) { + DataFrameIndexerTransformStats jobStats, + DataFrameTransformProgress transformProgress) { super(executor, initialState, initialPosition, jobStats); this.auditor = Objects.requireNonNull(auditor); + this.transformConfig = ExceptionsHelper.requireNonNull(transformConfig, "transformConfig"); + this.fieldMappings = ExceptionsHelper.requireNonNull(fieldMappings, "fieldMappings"); + this.progress = transformProgress; } - protected abstract DataFrameTransformConfig getConfig(); - - protected abstract Map getFieldMappings(); - - @Nullable - protected abstract DataFrameTransformProgress getProgress(); - protected abstract void failIndexer(String message); public int getPageSize() { return pageSize; } + public DataFrameTransformConfig getConfig() { + return transformConfig; + } + + public Map getFieldMappings() { + return fieldMappings; + } + + public DataFrameTransformProgress getProgress() { + return progress; + } + /** * Request a checkpoint */ @@ -83,8 +97,7 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer listener) { try { - QueryBuilder queryBuilder = getConfig().getSource().getQueryConfig().getQuery(); - pivot = new Pivot(getConfig().getSource().getIndex(), queryBuilder, getConfig().getPivotConfig()); + pivot = new Pivot(getConfig().getPivotConfig()); // if we haven't set the page size yet, if it is set we might have reduced it after running into an out of memory if (pageSize == 0) { @@ -115,12 +128,18 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer> doProcess(SearchResponse searchResponse) { final CompositeAggregation agg = searchResponse.getAggregations().get(COMPOSITE_AGGREGATION_NAME); + + // we reached the end + if (agg.getBuckets().isEmpty()) { + return new IterationResult<>(Collections.emptyList(), null, true); + } + long docsBeforeProcess = getStats().getNumDocuments(); IterationResult> result = new IterationResult<>(processBucketsToIndexRequests(agg).collect(Collectors.toList()), agg.afterKey(), agg.getBuckets().isEmpty()); - if (getProgress() != null) { - getProgress().docsProcessed(getStats().getNumDocuments() - docsBeforeProcess); + if (progress != null) { + progress.docsProcessed(getStats().getNumDocuments() - docsBeforeProcess); } return result; } @@ -166,7 +185,7 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer fieldMappings; - private final DataFrameTransformConfig transformConfig; - private volatile DataFrameTransformProgress progress; private volatile DataFrameIndexerTransformStats previouslyPersistedStats = null; private final AtomicInteger failureCount; // Keeps track of the last exception that was written to our audit, keeps us from spamming the audit index @@ -470,19 +466,18 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S .threadPool .executor(ThreadPool.Names.GENERIC), ExceptionsHelper.requireNonNull(auditor, "auditor"), + transformConfig, + fieldMappings, ExceptionsHelper.requireNonNull(initialState, "initialState"), initialPosition, - initialStats == null ? new DataFrameIndexerTransformStats(transformId) : initialStats); + initialStats == null ? new DataFrameIndexerTransformStats(transformId) : initialStats, + transformProgress); this.transformId = ExceptionsHelper.requireNonNull(transformId, "transformId"); this.transformsConfigManager = ExceptionsHelper.requireNonNull(transformsConfigManager, "transformsConfigManager"); this.transformsCheckpointService = ExceptionsHelper.requireNonNull(transformsCheckpointService, "transformsCheckpointService"); this.client = ExceptionsHelper.requireNonNull(client, "client"); - this.auditor = auditor; - this.transformConfig = ExceptionsHelper.requireNonNull(transformConfig, "transformConfig"); this.transformTask = parentTask; - this.fieldMappings = ExceptionsHelper.requireNonNull(fieldMappings, "fieldMappings"); - this.progress = transformProgress; this.failureCount = new AtomicInteger(0); } @@ -510,21 +505,6 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } } - @Override - protected DataFrameTransformConfig getConfig() { - return transformConfig; - } - - @Override - protected Map getFieldMappings() { - return fieldMappings; - } - - @Override - protected DataFrameTransformProgress getProgress() { - return progress; - } - @Override protected String getJobId() { return transformId; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java index d9223fe90dd..6d6f8455851 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java @@ -21,6 +21,7 @@ import java.util.TreeMap; */ public final class IDGenerator { private static final byte[] NULL_VALUE = "__NULL_VALUE__".getBytes(StandardCharsets.UTF_8); + private static final byte[] EMPTY_VALUE = "__EMPTY_VALUE__".getBytes(StandardCharsets.UTF_8); private static final byte DELIM = '$'; private static final long SEED = 19; private static final int MAX_FIRST_BYTES = 5; @@ -57,7 +58,9 @@ public final class IDGenerator { for (Object value : objectsForIDGeneration.values()) { byte[] v = getBytes(value); - + if (v.length == 0) { + v = EMPTY_VALUE; + } buffer.append(v, 0, v.length); buffer.append(DELIM); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java index 9a8bf3f234d..0e5231442d1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java @@ -25,6 +25,7 @@ import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregati import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; @@ -37,24 +38,21 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; public class Pivot { public static final int DEFAULT_INITIAL_PAGE_SIZE = 500; + public static final int TEST_QUERY_PAGE_SIZE = 50; private static final String COMPOSITE_AGGREGATION_NAME = "_data_frame"; private final PivotConfig config; - private final String[] source; // objects for re-using private final CompositeAggregationBuilder cachedCompositeAggregation; - private final SearchRequest cachedSearchRequest; - public Pivot(String[] source, QueryBuilder query, PivotConfig config) { - this.source = source; + public Pivot(PivotConfig config) { this.config = config; this.cachedCompositeAggregation = createCompositeAggregation(config); - this.cachedSearchRequest = createSearchRequest(source, query, cachedCompositeAggregation); } - public void validate(Client client, final ActionListener listener) { + public void validate(Client client, SourceConfig sourceConfig, final ActionListener listener) { // step 1: check if used aggregations are supported for (AggregationBuilder agg : config.getAggregationConfig().getAggregatorFactories()) { if (Aggregations.isSupportedByDataframe(agg.getType()) == false) { @@ -64,11 +62,11 @@ public class Pivot { } // step 2: run a query to validate that config is valid - runTestQuery(client, listener); + runTestQuery(client, sourceConfig, listener); } - public void deduceMappings(Client client, final ActionListener> listener) { - SchemaUtil.deduceMappings(client, config, source, listener); + public void deduceMappings(Client client, SourceConfig sourceConfig, final ActionListener> listener) { + SchemaUtil.deduceMappings(client, config, sourceConfig.getIndex(), listener); } /** @@ -87,14 +85,24 @@ public class Pivot { return DEFAULT_INITIAL_PAGE_SIZE; } - public SearchRequest buildSearchRequest(Map position, int pageSize) { - if (position != null) { - cachedCompositeAggregation.aggregateAfter(position); - } + public SearchRequest buildSearchRequest(SourceConfig sourceConfig, Map position, int pageSize) { + QueryBuilder queryBuilder = sourceConfig.getQueryConfig().getQuery(); + SearchRequest searchRequest = new SearchRequest(sourceConfig.getIndex()); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.aggregation(buildAggregation(position, pageSize)); + sourceBuilder.size(0); + sourceBuilder.query(queryBuilder); + searchRequest.source(sourceBuilder); + return searchRequest; + + } + + public AggregationBuilder buildAggregation(Map position, int pageSize) { + cachedCompositeAggregation.aggregateAfter(position); cachedCompositeAggregation.size(pageSize); - return cachedSearchRequest; + return cachedCompositeAggregation; } public Stream> extractResults(CompositeAggregation agg, @@ -113,10 +121,10 @@ public class Pivot { dataFrameIndexerTransformStats); } - private void runTestQuery(Client client, final ActionListener listener) { - // no after key - cachedCompositeAggregation.aggregateAfter(null); - client.execute(SearchAction.INSTANCE, cachedSearchRequest, ActionListener.wrap(response -> { + private void runTestQuery(Client client, SourceConfig sourceConfig, final ActionListener listener) { + SearchRequest searchRequest = buildSearchRequest(sourceConfig, null, TEST_QUERY_PAGE_SIZE); + + client.execute(SearchAction.INSTANCE, searchRequest, ActionListener.wrap(response -> { if (response == null) { listener.onFailure(new RuntimeException("Unexpected null response from test query")); return; @@ -131,16 +139,6 @@ public class Pivot { })); } - private static SearchRequest createSearchRequest(String[] index, QueryBuilder query, CompositeAggregationBuilder compositeAggregation) { - SearchRequest searchRequest = new SearchRequest(index); - SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); - sourceBuilder.aggregation(compositeAggregation); - sourceBuilder.size(0); - sourceBuilder.query(query); - searchRequest.source(sourceBuilder); - return searchRequest; - } - private static CompositeAggregationBuilder createCompositeAggregation(PivotConfig config) { CompositeAggregationBuilder compositeAggregation; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java similarity index 85% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java rename to x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java index ddc7ddd4f1b..ba549aa7e8b 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java @@ -18,13 +18,12 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import java.io.IOException; import java.util.Arrays; import java.util.Collections; import static org.hamcrest.Matchers.hasItemInArray; -public class TransportStopDataFrameTransformActionTests extends ESTestCase { +public class DataFrameNodesTests extends ESTestCase { public void testDataframeNodes() { String dataFrameIdFoo = "df-id-foo"; @@ -49,12 +48,12 @@ public class TransportStopDataFrameTransformActionTests extends ESTestCase { } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeTo(StreamOutput out) { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) { return null; } }, @@ -64,7 +63,7 @@ public class TransportStopDataFrameTransformActionTests extends ESTestCase { .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); - String[] nodes = TransportStopDataFrameTransformAction.dataframeNodes(Arrays.asList(dataFrameIdFoo, dataFrameIdBar), cs); + String[] nodes = DataFrameNodes.dataFrameTaskNodes(Arrays.asList(dataFrameIdFoo, dataFrameIdBar), cs); assertEquals(2, nodes.length); assertThat(nodes, hasItemInArray("node-1")); assertThat(nodes, hasItemInArray("node-2")); @@ -72,7 +71,7 @@ public class TransportStopDataFrameTransformActionTests extends ESTestCase { public void testDataframeNodes_NoTasks() { ClusterState emptyState = ClusterState.builder(new ClusterName("_name")).build(); - String[] nodes = TransportStopDataFrameTransformAction.dataframeNodes(Collections.singletonList("df-id"), emptyState); + String[] nodes = DataFrameNodes.dataFrameTaskNodes(Collections.singletonList("df-id"), emptyState); assertEquals(0, nodes.length); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java index 4bcfef68694..f9c2d4fd95b 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java @@ -39,12 +39,12 @@ import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; import org.junit.After; import org.junit.Before; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java index 8c59cf00be0..36ae4f3f162 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java @@ -10,11 +10,11 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointTests; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformCheckpointTests; import org.junit.Before; import java.util.Arrays; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java index 015eb4b65e3..f3f3255f07a 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; @@ -32,7 +31,6 @@ import org.junit.Before; import java.util.Collections; import java.util.Map; -import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; @@ -48,13 +46,9 @@ import static org.mockito.Mockito.when; public class DataFrameIndexerTests extends ESTestCase { private Client client; - private static final String TEST_ORIGIN = "test_origin"; - private static final String TEST_INDEX = "test_index"; class MockedDataFrameIndexer extends DataFrameIndexer { - private final DataFrameTransformConfig transformConfig; - private final Map fieldMappings; private final Function searchFunction; private final Function bulkFunction; private final Consumer failureConsumer; @@ -73,9 +67,8 @@ public class DataFrameIndexerTests extends ESTestCase { Function searchFunction, Function bulkFunction, Consumer failureConsumer) { - super(executor, auditor, initialState, initialPosition, jobStats); - this.transformConfig = Objects.requireNonNull(transformConfig); - this.fieldMappings = Objects.requireNonNull(fieldMappings); + super(executor, auditor, transformConfig, fieldMappings, initialState, initialPosition, jobStats, + /* DataFrameTransformProgress */ null); this.searchFunction = searchFunction; this.bulkFunction = bulkFunction; this.failureConsumer = failureConsumer; @@ -85,21 +78,6 @@ public class DataFrameIndexerTests extends ESTestCase { return latch = new CountDownLatch(count); } - @Override - protected DataFrameTransformConfig getConfig() { - return transformConfig; - } - - @Override - protected Map getFieldMappings() { - return fieldMappings; - } - - @Override - protected DataFrameTransformProgress getProgress() { - return null; - } - @Override protected void createCheckpoint(ActionListener listener) { listener.onResponse(null); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java index fd378a2c4c1..3ce5dd81558 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java @@ -27,6 +27,9 @@ public class IDGeneratorTests extends ESTestCase { assertNotEquals(id, idGen.getID()); idGen.add("key6", 13); assertNotEquals(id, idGen.getID()); + id = idGen.getID(); + idGen.add("key7", ""); + assertNotEquals(id, idGen.getID()); } public void testOrderIndependence() { diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index d4607d7adc3..172868833f3 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -22,12 +22,13 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.core.dataframe.transforms.QueryConfig; +import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfig; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfigTests; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; @@ -83,42 +84,46 @@ public class PivotTests extends ESTestCase { } public void testValidateExistingIndex() throws Exception { - Pivot pivot = new Pivot(new String[]{"existing_source_index"}, new MatchAllQueryBuilder(), getValidPivotConfig()); + SourceConfig source = new SourceConfig(new String[]{"existing_source_index"}, QueryConfig.matchAll()); + Pivot pivot = new Pivot(getValidPivotConfig()); - assertValidTransform(client, pivot); + assertValidTransform(client, source, pivot); } public void testValidateNonExistingIndex() throws Exception { - Pivot pivot = new Pivot(new String[]{"non_existing_source_index"}, new MatchAllQueryBuilder(), getValidPivotConfig()); + SourceConfig source = new SourceConfig(new String[]{"non_existing_source_index"}, QueryConfig.matchAll()); + Pivot pivot = new Pivot(getValidPivotConfig()); - assertInvalidTransform(client, pivot); + assertInvalidTransform(client, source, pivot); } public void testSearchFailure() throws Exception { // test a failure during the search operation, transform creation fails if // search has failures although they might just be temporary - Pivot pivot = new Pivot(new String[]{"existing_source_index_with_failing_shards"}, - new MatchAllQueryBuilder(), - getValidPivotConfig()); + SourceConfig source = new SourceConfig(new String[] { "existing_source_index_with_failing_shards" }, QueryConfig.matchAll()); - assertInvalidTransform(client, pivot); + Pivot pivot = new Pivot(getValidPivotConfig()); + + assertInvalidTransform(client, source, pivot); } public void testValidateAllSupportedAggregations() throws Exception { for (String agg : supportedAggregations) { AggregationConfig aggregationConfig = getAggregationConfig(agg); + SourceConfig source = new SourceConfig(new String[]{"existing_source"}, QueryConfig.matchAll()); - Pivot pivot = new Pivot(new String[]{"existing_source"}, new MatchAllQueryBuilder(), getValidPivotConfig(aggregationConfig)); - assertValidTransform(client, pivot); + Pivot pivot = new Pivot(getValidPivotConfig(aggregationConfig)); + assertValidTransform(client, source, pivot); } } public void testValidateAllUnsupportedAggregations() throws Exception { for (String agg : unsupportedAggregations) { AggregationConfig aggregationConfig = getAggregationConfig(agg); + SourceConfig source = new SourceConfig(new String[]{"existing_source"}, QueryConfig.matchAll()); - Pivot pivot = new Pivot(new String[]{"existing_source"}, new MatchAllQueryBuilder(), getValidPivotConfig(aggregationConfig)); - assertInvalidTransform(client, pivot); + Pivot pivot = new Pivot(getValidPivotConfig(aggregationConfig)); + assertInvalidTransform(client, source, pivot); } } @@ -202,18 +207,18 @@ public class PivotTests extends ESTestCase { return AggregationConfig.fromXContent(parser, false); } - private static void assertValidTransform(Client client, Pivot pivot) throws Exception { - validate(client, pivot, true); + private static void assertValidTransform(Client client, SourceConfig source, Pivot pivot) throws Exception { + validate(client, source, pivot, true); } - private static void assertInvalidTransform(Client client, Pivot pivot) throws Exception { - validate(client, pivot, false); + private static void assertInvalidTransform(Client client, SourceConfig source, Pivot pivot) throws Exception { + validate(client, source, pivot, false); } - private static void validate(Client client, Pivot pivot, boolean expectValid) throws Exception { + private static void validate(Client client, SourceConfig source, Pivot pivot, boolean expectValid) throws Exception { CountDownLatch latch = new CountDownLatch(1); final AtomicReference exceptionHolder = new AtomicReference<>(); - pivot.validate(client, ActionListener.wrap(validity -> { + pivot.validate(client, source, ActionListener.wrap(validity -> { assertEquals(expectValid, validity); latch.countDown(); }, e -> { diff --git a/x-pack/plugin/deprecation/build.gradle b/x-pack/plugin/deprecation/build.gradle index d89eb62e884..62d2a891929 100644 --- a/x-pack/plugin/deprecation/build.gradle +++ b/x-pack/plugin/deprecation/build.gradle @@ -13,8 +13,4 @@ dependencies { compileOnly "org.elasticsearch.plugin:x-pack-core:${version}" } -run { - plugin xpackModule('core') -} - integTest.enabled = false diff --git a/x-pack/plugin/graph/build.gradle b/x-pack/plugin/graph/build.gradle index f1bac2e54d4..e7b0b44fd65 100644 --- a/x-pack/plugin/graph/build.gradle +++ b/x-pack/plugin/graph/build.gradle @@ -24,8 +24,4 @@ gradle.projectsEvaluated { .each { check.dependsOn it.check } } -run { - plugin xpackModule('core') -} - integTest.enabled = false diff --git a/x-pack/plugin/ilm/build.gradle b/x-pack/plugin/ilm/build.gradle index 71def893781..e6962e3c3bf 100644 --- a/x-pack/plugin/ilm/build.gradle +++ b/x-pack/plugin/ilm/build.gradle @@ -29,6 +29,3 @@ gradle.projectsEvaluated { integTest.enabled = false -run { - plugin xpackModule('core') -} diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java index 40f322945dc..8e74426e0b3 100644 --- a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -137,6 +137,7 @@ public class PermissionsIT extends ESRestTestCase { * Tests when the user is limited by alias of an index is able to write to index * which was rolled over by an ILM policy. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41440") public void testWhenUserLimitedByOnlyAliasOfIndexCanWriteToIndexWhichWasRolledoverByILMPolicy() throws IOException, InterruptedException { /* diff --git a/x-pack/plugin/logstash/build.gradle b/x-pack/plugin/logstash/build.gradle index 1057a1c8526..476d3f17cad 100644 --- a/x-pack/plugin/logstash/build.gradle +++ b/x-pack/plugin/logstash/build.gradle @@ -15,8 +15,4 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } -run { - plugin xpackModule('core') -} - integTest.enabled = false diff --git a/x-pack/plugin/ml/build.gradle b/x-pack/plugin/ml/build.gradle index 6ca1a44c145..9bd4d445892 100644 --- a/x-pack/plugin/ml/build.gradle +++ b/x-pack/plugin/ml/build.gradle @@ -9,13 +9,26 @@ esplugin { extendedPlugins = ['x-pack-core', 'lang-painless'] } + +repositories { + ivy { + name "ml-cpp" + url "https://prelert-artifacts.s3.amazonaws.com" + metadataSources { + // no repository metadata, look directly for the artifact + artifact() + } + patternLayout { + artifact "maven/org/elasticsearch/ml/ml-cpp/[revision]/ml-cpp-[revision].[ext]" + } + } +} + configurations { nativeBundle { resolutionStrategy.dependencySubstitution { if (findProject(':ml-cpp') != null) { substitute module("org.elasticsearch.ml:ml-cpp") with project(":ml-cpp") - } else { - substitute module("org.elasticsearch.ml:ml-cpp") with project("${project.path}:cpp-snapshot") } } } @@ -81,10 +94,6 @@ project.afterEvaluate { } } -run { - plugin xpackModule('core') -} - // xpack modules are installed in real clusters as the meta plugin, so // installing them as individual plugins for integ tests doesn't make sense, // so we disable integ tests diff --git a/x-pack/plugin/ml/cpp-snapshot/.gitignore b/x-pack/plugin/ml/cpp-snapshot/.gitignore deleted file mode 100644 index 16d3c4dbbfe..00000000000 --- a/x-pack/plugin/ml/cpp-snapshot/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.cache diff --git a/x-pack/plugin/ml/cpp-snapshot/build.gradle b/x-pack/plugin/ml/cpp-snapshot/build.gradle deleted file mode 100644 index e5b55293159..00000000000 --- a/x-pack/plugin/ml/cpp-snapshot/build.gradle +++ /dev/null @@ -1,55 +0,0 @@ -import org.elasticsearch.gradle.VersionProperties - -apply plugin: 'distribution' - -ext.version = VersionProperties.elasticsearch - -// This project pulls a snapshot version of the ML cpp artifacts and sets that as the artifact -// for this project so it can be used with dependency substitution. - -void getZip(File snapshotZip) { - String zipUrl = "http://prelert-artifacts.s3.amazonaws.com/maven/org/elasticsearch/ml/ml-cpp/${version}/ml-cpp-${version}.zip" - File snapshotMd5 = new File(snapshotZip.toString() + '.md5') - HttpURLConnection conn = (HttpURLConnection) new URL(zipUrl).openConnection(); - - // do a HEAD first to check the zip hash against the local file - conn.setRequestMethod('HEAD'); - if (conn.getResponseCode() != HttpURLConnection.HTTP_OK) { - throw new GradleException('ML cpp snapshot does not exist') - } - - String remoteMd5 = conn.getHeaderField('ETag') - if (snapshotZip.exists()) { - // do a HEAD first to check the zip hash against the local file - String localMd5 = snapshotMd5.getText('UTF-8') - if (remoteMd5.equals(localMd5)) { - logger.info('Using cached ML snapshot') - return - } - } - - snapshotZip.bytes = new URL(zipUrl).bytes - snapshotMd5.setText(remoteMd5, 'UTF-8') -} - -File snapshotZip = new File(projectDir, ".cache/ml-cpp-${version}.zip") -task downloadMachineLearningSnapshot { - onlyIf { - // skip if ml-cpp is being built locally - findProject(':ml-cpp') == null && - // skip for offline builds - just rely on the artifact already having been downloaded before here - project.gradle.startParameter.isOffline() == false - } - doFirst { - snapshotZip.parentFile.mkdirs() - getZip(snapshotZip) - } -} - -task cleanCache(type: Delete) { - delete "${projectDir}/.cache" -} - -artifacts { - 'default' file: snapshotZip, name: 'ml-cpp', type: 'zip', builtBy: downloadMachineLearningSnapshot -} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 281a374b97b..67acc1d0d67 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -31,6 +31,8 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -188,6 +190,7 @@ import org.elasticsearch.xpack.ml.notifications.Auditor; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.NativeControllerHolder; +import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction; import org.elasticsearch.xpack.ml.rest.RestFindFileStructureAction; import org.elasticsearch.xpack.ml.rest.RestMlInfoAction; @@ -293,6 +296,10 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final Setting MAX_OPEN_JOBS_PER_NODE = Setting.intSetting("xpack.ml.max_open_jobs", 20, 1, MAX_MAX_OPEN_JOBS_PER_NODE, Property.Dynamic, Property.NodeScope); + // Undocumented setting for integration test purposes + public static final Setting MIN_DISK_SPACE_OFF_HEAP = + Setting.byteSizeSetting("xpack.ml.min_disk_space_off_heap", new ByteSizeValue(5, ByteSizeUnit.GB), Setting.Property.NodeScope); + private static final Logger logger = LogManager.getLogger(XPackPlugin.class); private final Settings settings; @@ -333,7 +340,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu AutodetectBuilder.DONT_PERSIST_MODEL_STATE_SETTING, AutodetectBuilder.MAX_ANOMALY_RECORDS_SETTING_DYNAMIC, MAX_OPEN_JOBS_PER_NODE, - AutodetectProcessManager.MIN_DISK_SPACE_OFF_HEAP, + MIN_DISK_SPACE_OFF_HEAP, MlConfigMigrationEligibilityCheck.ENABLE_CONFIG_MIGRATION)); } @@ -424,6 +431,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(client); JobResultsPersister jobResultsPersister = new JobResultsPersister(client); + NativeStorageProvider nativeStorageProvider = new NativeStorageProvider(environment, MIN_DISK_SPACE_OFF_HEAP.get(settings)); + AutodetectProcessFactory autodetectProcessFactory; NormalizerProcessFactory normalizerProcessFactory; if (MachineLearningField.AUTODETECT_PROCESS.get(settings) && MachineLearningFeatureSet.isRunningOnMlPlatform(true)) { @@ -454,8 +463,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu NormalizerFactory normalizerFactory = new NormalizerFactory(normalizerProcessFactory, threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)); AutodetectProcessManager autodetectProcessManager = new AutodetectProcessManager(env, settings, client, threadPool, - jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory, - normalizerFactory, xContentRegistry, auditor, clusterService); + xContentRegistry, auditor, clusterService, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, + autodetectProcessFactory, normalizerFactory, nativeStorageProvider); this.autodetectProcessManager.set(autodetectProcessManager); DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder(client, settings, xContentRegistry, auditor, System::currentTimeMillis); @@ -472,8 +481,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu new InvalidLicenseEnforcer(getLicenseState(), threadPool, datafeedManager, autodetectProcessManager); enforcer.listenForLicenseStateChanges(); - // run node startup tasks - autodetectProcessManager.onNodeStartup(); + // Perform node startup operations + nativeStorageProvider.cleanupLocalTmpStorageInCaseOfUncleanShutdown(); return Arrays.asList( mlLifeCycleService, @@ -488,7 +497,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu datafeedManager, auditor, new MlAssignmentNotifier(settings, auditor, threadPool, client, clusterService), - memoryTracker + memoryTracker, + nativeStorageProvider ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java index ac3d41f26e0..14e434099f8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportForecastJobAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; +import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import java.nio.file.Path; import java.util.List; @@ -38,16 +39,19 @@ public class TransportForecastJobAction extends TransportJobTaskAction MIN_DISK_SPACE_OFF_HEAP = - Setting.byteSizeSetting("xpack.ml.min_disk_space_off_heap", new ByteSizeValue(5, ByteSizeUnit.GB), Property.NodeScope); - private static final Logger logger = LogManager.getLogger(AutodetectProcessManager.class); private final Client client; @@ -111,9 +104,6 @@ public class AutodetectProcessManager implements ClusterStateListener { private NativeStorageProvider nativeStorageProvider; private final ConcurrentMap processByAllocation = new ConcurrentHashMap<>(); - // a map that manages the allocation of temporary space to jobs - private final ConcurrentMap nativeTmpStorage = new ConcurrentHashMap<>(); - private volatile int maxAllowedRunningJobs; private final NamedXContentRegistry xContentRegistry; @@ -123,10 +113,10 @@ public class AutodetectProcessManager implements ClusterStateListener { private volatile boolean upgradeInProgress; public AutodetectProcessManager(Environment environment, Settings settings, Client client, ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, Auditor auditor, ClusterService clusterService, JobManager jobManager, JobResultsProvider jobResultsProvider, JobResultsPersister jobResultsPersister, - JobDataCountsPersister jobDataCountsPersister, - AutodetectProcessFactory autodetectProcessFactory, NormalizerFactory normalizerFactory, - NamedXContentRegistry xContentRegistry, Auditor auditor, ClusterService clusterService) { + JobDataCountsPersister jobDataCountsPersister, AutodetectProcessFactory autodetectProcessFactory, + NormalizerFactory normalizerFactory, NativeStorageProvider nativeStorageProvider) { this.environment = environment; this.client = client; this.threadPool = threadPool; @@ -139,7 +129,7 @@ public class AutodetectProcessManager implements ClusterStateListener { this.jobResultsPersister = jobResultsPersister; this.jobDataCountsPersister = jobDataCountsPersister; this.auditor = auditor; - this.nativeStorageProvider = new NativeStorageProvider(environment, MIN_DISK_SPACE_OFF_HEAP.get(settings)); + this.nativeStorageProvider = Objects.requireNonNull(nativeStorageProvider); clusterService.addListener(this); clusterService.getClusterSettings() .addSettingsUpdateConsumer(MachineLearning.MAX_OPEN_JOBS_PER_NODE, this::setMaxAllowedRunningJobs); @@ -149,14 +139,6 @@ public class AutodetectProcessManager implements ClusterStateListener { this.maxAllowedRunningJobs = maxAllowedRunningJobs; } - public void onNodeStartup() { - try { - nativeStorageProvider.cleanupLocalTmpStorageInCaseOfUncleanShutdown(); - } catch (Exception e) { - logger.warn("Failed to cleanup native storage from previous invocation", e); - } - } - public synchronized void closeAllJobsOnThisNode(String reason) { int numJobs = processByAllocation.size(); if (numJobs != 0) { @@ -283,28 +265,6 @@ public class AutodetectProcessManager implements ClusterStateListener { }); } - /** - * Request temporary storage to be used for the job - * - * @param jobTask The job task - * @param requestedSize requested size - * @return a Path to local storage or null if storage is not available - */ - public Path tryGetTmpStorage(JobTask jobTask, ByteSizeValue requestedSize) { - String jobId = jobTask.getJobId(); - Path path = nativeTmpStorage.get(jobId); - if (path == null) { - path = nativeStorageProvider.tryGetLocalTmpStorage(jobId, requestedSize); - if (path != null) { - nativeTmpStorage.put(jobId, path); - } - } else if (!nativeStorageProvider.localTmpStorageHasEnoughSpace(path, requestedSize)) { - // the previous tmp location ran out of disk space, do not allow further usage - return null; - } - return path; - } - /** * Do a forecast for the running job. * @@ -602,7 +562,7 @@ public class AutodetectProcessManager implements ClusterStateListener { } setJobState(jobTask, JobState.FAILED, reason); try { - removeTmpStorage(jobTask.getJobId()); + nativeStorageProvider.cleanupLocalTmpStorage(jobTask.getDescription()); } catch (IOException e) { logger.error(new ParameterizedMessage("[{}] Failed to delete temporary files", jobTask.getJobId()), e); } @@ -666,7 +626,7 @@ public class AutodetectProcessManager implements ClusterStateListener { } // delete any tmp storage try { - removeTmpStorage(jobId); + nativeStorageProvider.cleanupLocalTmpStorage(jobTask.getDescription()); } catch (IOException e) { logger.error(new ParameterizedMessage("[{}]Failed to delete temporary files", jobId), e); } @@ -760,13 +720,6 @@ public class AutodetectProcessManager implements ClusterStateListener { return Optional.of(new Tuple<>(communicator.getDataCounts(), communicator.getModelSizeStats())); } - private void removeTmpStorage(String jobId) throws IOException { - Path path = nativeTmpStorage.get(jobId); - if (path != null) { - nativeStorageProvider.cleanupLocalTmpStorage(path); - } - } - ExecutorService createAutodetectExecutorService(ExecutorService executorService) { AutodetectWorkerExecutorService autoDetectWorkerExecutor = new AutodetectWorkerExecutorService(threadPool.getThreadContext()); executorService.submit(autoDetectWorkerExecutor::start); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java index 9f366ab1131..93432af962f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java @@ -15,6 +15,8 @@ import org.elasticsearch.env.Environment; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; /** * Provide storage for native components. @@ -23,7 +25,6 @@ public class NativeStorageProvider { private static final Logger LOGGER = LogManager.getLogger(NativeStorageProvider.class); - private static final String LOCAL_STORAGE_SUBFOLDER = "ml-local-data"; private static final String LOCAL_STORAGE_TMP_FOLDER = "tmp"; @@ -32,6 +33,9 @@ public class NativeStorageProvider { // do not allow any usage below this threshold private final ByteSizeValue minLocalStorageAvailable; + // A map to keep track of allocated native storage by resource id + private final ConcurrentMap allocatedStorage = new ConcurrentHashMap<>(); + public NativeStorageProvider(Environment environment, ByteSizeValue minDiskSpaceOffHeap) { this.environment = environment; this.minLocalStorageAvailable = minDiskSpaceOffHeap; @@ -44,12 +48,14 @@ public class NativeStorageProvider { * unclean node shutdown or broken clients. * * Do not call while there are running jobs. - * - * @throws IOException if cleanup fails */ - public void cleanupLocalTmpStorageInCaseOfUncleanShutdown() throws IOException { - for (Path p : environment.dataFiles()) { - IOUtils.rm(p.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER)); + public void cleanupLocalTmpStorageInCaseOfUncleanShutdown() { + try { + for (Path p : environment.dataFiles()) { + IOUtils.rm(p.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER)); + } + } catch (Exception e) { + LOGGER.warn("Failed to cleanup native storage from previous invocation", e); } } @@ -61,17 +67,28 @@ public class NativeStorageProvider { * @return Path for temporary storage if available, null otherwise */ public Path tryGetLocalTmpStorage(String uniqueIdentifier, ByteSizeValue requestedSize) { + Path path = allocatedStorage.get(uniqueIdentifier); + if (path != null && localTmpStorageHasEnoughSpace(path, requestedSize) == false) { + LOGGER.debug("Previous tmp storage for [{}] run out, returning null", uniqueIdentifier); + return null; + } else { + path = tryAllocateStorage(uniqueIdentifier, requestedSize); + } + return path; + } + + private Path tryAllocateStorage(String uniqueIdentifier, ByteSizeValue requestedSize) { for (Path path : environment.dataFiles()) { try { if (getUsableSpace(path) >= requestedSize.getBytes() + minLocalStorageAvailable.getBytes()) { Path tmpDirectory = path.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER).resolve(uniqueIdentifier); Files.createDirectories(tmpDirectory); + allocatedStorage.put(uniqueIdentifier, tmpDirectory); return tmpDirectory; } } catch (IOException e) { LOGGER.debug("Failed to obtain information about path [{}]: {}", path, e); } - } LOGGER.debug("Failed to find native storage for [{}], returning null", uniqueIdentifier); return null; @@ -96,17 +113,18 @@ public class NativeStorageProvider { /** * Delete temporary storage, previously allocated * - * @param path - * Path to temporary storage - * @throws IOException - * if path can not be cleaned up + * @param uniqueIdentifier the identifier to which storage was allocated + * @throws IOException if path can not be cleaned up */ - public void cleanupLocalTmpStorage(Path path) throws IOException { - // do not allow to breakout from the tmp storage provided - Path realPath = path.toAbsolutePath(); - for (Path p : environment.dataFiles()) { - if (realPath.startsWith(p.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER))) { - IOUtils.rm(path); + public void cleanupLocalTmpStorage(String uniqueIdentifier) throws IOException { + Path path = allocatedStorage.remove(uniqueIdentifier); + if (path != null) { + // do not allow to breakout from the tmp storage provided + Path realPath = path.toAbsolutePath(); + for (Path p : environment.dataFiles()) { + if (realPath.startsWith(p.resolve(LOCAL_STORAGE_SUBFOLDER).resolve(LOCAL_STORAGE_TMP_FOLDER))) { + IOUtils.rm(path); + } } } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 9a147dfd1bc..5f1ce1ddf7d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -54,6 +54,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -121,6 +122,7 @@ public class AutodetectProcessManagerTests extends ESTestCase { private Auditor auditor; private ClusterState clusterState; private ClusterService clusterService; + private NativeStorageProvider nativeStorageProvider; private DataCounts dataCounts = new DataCounts("foo"); private ModelSizeStats modelSizeStats = new ModelSizeStats.Builder("foo").build(); @@ -159,6 +161,7 @@ public class AutodetectProcessManagerTests extends ESTestCase { clusterState = mock(ClusterState.class); when(clusterState.getMetaData()).thenReturn(metaData); when(clusterState.metaData()).thenReturn(metaData); + nativeStorageProvider = mock(NativeStorageProvider.class); doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") @@ -685,9 +688,8 @@ public class AutodetectProcessManagerTests extends ESTestCase { private AutodetectProcessManager createManager(Settings settings) { return new AutodetectProcessManager(environment, settings, - client, threadPool, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, - autodetectFactory, normalizerFactory, - new NamedXContentRegistry(Collections.emptyList()), auditor, clusterService); + client, threadPool, new NamedXContentRegistry(Collections.emptyList()), auditor, clusterService, jobManager, jobResultsProvider, + jobResultsPersister, jobDataCountsPersister, autodetectFactory, normalizerFactory, nativeStorageProvider); } private AutodetectProcessManager createSpyManagerAndCallProcessData(String jobId) { AutodetectProcessManager manager = createSpyManager(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java index fd87e29387e..4a2b7efc1b5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java @@ -89,7 +89,7 @@ public class NativeStorageProviderTests extends ESTestCase { Assert.assertTrue(Files.isRegularFile(testFile)); // the native component should cleanup itself, but assume it has crashed - storageProvider.cleanupLocalTmpStorage(path); + storageProvider.cleanupLocalTmpStorage(id); Assert.assertFalse(Files.exists(testFile)); Assert.assertFalse(Files.exists(path)); } diff --git a/x-pack/plugin/monitoring/build.gradle b/x-pack/plugin/monitoring/build.gradle index 5d17b81e2fa..b2e0c930e0d 100644 --- a/x-pack/plugin/monitoring/build.gradle +++ b/x-pack/plugin/monitoring/build.gradle @@ -45,10 +45,6 @@ dependencyLicenses { mapping from: /commons-.*/, to: 'commons' // pulled in by rest client } -run { - plugin xpackModule('core') -} - // xpack modules are installed in real clusters as the meta plugin, so // installing them as individual plugins for integ tests doesn't make sense, // so we disable integ tests diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index 75fd22abacc..d159f3334b9 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -21,8 +21,4 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } -run { - plugin xpackModule('core') -} - integTest.enabled = false diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index e900d76c849..4a8d007e3b8 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.rollup; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; @@ -61,9 +62,9 @@ public class RollupResponseTranslator { * Verifies a live-only search response. Essentially just checks for failure then returns * the response since we have no work to do */ - public static SearchResponse verifyResponse(MultiSearchResponse.Item normalResponse) { + public static SearchResponse verifyResponse(MultiSearchResponse.Item normalResponse) throws Exception { if (normalResponse.isFailure()) { - throw new RuntimeException(normalResponse.getFailureMessage(), normalResponse.getFailure()); + throw normalResponse.getFailure(); } return normalResponse.getResponse(); } @@ -77,16 +78,30 @@ public class RollupResponseTranslator { * on the translation conventions */ public static SearchResponse translateResponse(MultiSearchResponse.Item[] rolledMsearch, - InternalAggregation.ReduceContext reduceContext) { + InternalAggregation.ReduceContext reduceContext) throws Exception { - List responses = Arrays.stream(rolledMsearch) - .map(item -> { - if (item.isFailure()) { - throw new RuntimeException(item.getFailureMessage(), item.getFailure()); - } - return item.getResponse(); - }).collect(Collectors.toList()); + assert rolledMsearch.length > 0; + List responses = new ArrayList<>(); + for (MultiSearchResponse.Item item : rolledMsearch) { + if (item.isFailure()) { + Exception e = item.getFailure(); + // If an index was deleted after execution, give a hint to the user that this is a transient error + if (e instanceof IndexNotFoundException) { + throw new ResourceNotFoundException("Index [" + ((IndexNotFoundException) e).getIndex().getName() + + "] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again."); + } + + // Otherwise just throw + throw e; + } + + // No error, add to responses + responses.add(item.getResponse()); + } + + assert responses.size() > 0; return doCombineResponse(null, responses, reduceContext); } @@ -187,48 +202,45 @@ public class RollupResponseTranslator { * @param msearchResponses The responses from the msearch, where the first response is the live-index response */ public static SearchResponse combineResponses(MultiSearchResponse.Item[] msearchResponses, - InternalAggregation.ReduceContext reduceContext) { - boolean liveMissing = false; + InternalAggregation.ReduceContext reduceContext) throws Exception { + assert msearchResponses.length >= 2; - // The live response is always first - MultiSearchResponse.Item liveResponse = msearchResponses[0]; - if (liveResponse.isFailure()) { - Exception e = liveResponse.getFailure(); - // If we have a rollup response we can tolerate a missing live response - if (e instanceof IndexNotFoundException) { - logger.warn("\"Live\" index not found during rollup search.", e); - liveMissing = true; - } else { - throw new RuntimeException(liveResponse.getFailureMessage(), liveResponse.getFailure()); + boolean first = true; + SearchResponse liveResponse = null; + List rolledResponses = new ArrayList<>(); + for (MultiSearchResponse.Item item : msearchResponses) { + if (item.isFailure()) { + Exception e = item.getFailure(); + + // If an index was deleted after execution, give a hint to the user that this is a transient error + if (e instanceof IndexNotFoundException) { + throw new ResourceNotFoundException("Index [" + ((IndexNotFoundException) e).getIndex() + "] was not found, " + + "likely because it was deleted while the request was in-flight. Rollup does not support partial search results, " + + "please try the request again.", e); + } + + // Otherwise just throw + throw e; } - } - List rolledResponses = Arrays.stream(msearchResponses) - .skip(1) - .map(item -> { - if (item.isFailure()) { - Exception e = item.getFailure(); - // If we have a normal response we can tolerate a missing rollup response, although it theoretically - // should be handled by a different code path (verifyResponse) - if (e instanceof IndexNotFoundException) { - logger.warn("Rollup index not found during rollup search.", e); - } else { - throw new RuntimeException(item.getFailureMessage(), item.getFailure()); - } - return null; - } else { - return item.getResponse(); - } - }).filter(Objects::nonNull).collect(Collectors.toList()); - // If we only have a live index left, process it directly - if (rolledResponses.isEmpty() && liveMissing == false) { - return verifyResponse(liveResponse); - } else if (rolledResponses.isEmpty() && liveMissing) { - throw new RuntimeException("No indices (live or rollup) found during rollup search"); + // No error, add to responses + if (first) { + liveResponse = item.getResponse(); + } else { + rolledResponses.add(item.getResponse()); + } + first = false; } - return doCombineResponse(liveResponse.getResponse(), rolledResponses, reduceContext); + // If we only have a live index left, just return it directly. We know it can't be an error already + if (rolledResponses.isEmpty() && liveResponse != null) { + return liveResponse; + } else if (rolledResponses.isEmpty()) { + throw new ResourceNotFoundException("No indices (live or rollup) found during rollup search"); + } + + return doCombineResponse(liveResponse, rolledResponses, reduceContext); } private static SearchResponse doCombineResponse(SearchResponse liveResponse, List rolledResponses, diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index 414a0d08ef3..2a1308353d6 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -111,7 +111,7 @@ public class TransportRollupSearchAction extends TransportAction> doProcess(SearchResponse searchResponse) { final CompositeAggregation response = searchResponse.getAggregations().get(AGGREGATION_NAME); + if (response.getBuckets().isEmpty()) { + // do not reset the position as we want to continue from where we stopped + return new IterationResult<>(Collections.emptyList(), getPosition(), true); + } + return new IterationResult<>( IndexerUtils.processBuckets(response, job.getConfig().getRollupIndex(), getStats(), job.getConfig().getGroupConfig(), job.getConfig().getId(), upgradedDocumentID.get()), diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 849461f1b62..6c20b90f5ae 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.CheckedConsumer; @@ -109,14 +110,13 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testRollupFailure() { MultiSearchResponse.Item[] failure = new MultiSearchResponse.Item[]{ - new MultiSearchResponse.Item(null, new IndexNotFoundException("live missing")), new MultiSearchResponse.Item(null, new RuntimeException("rollup failure"))}; BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); Exception e = expectThrows(RuntimeException.class, - () -> RollupResponseTranslator.combineResponses(failure, + () -> RollupResponseTranslator.translateResponse(failure, new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); assertThat(e.getMessage(), equalTo("rollup failure")); } @@ -129,13 +129,14 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - Exception e = expectThrows(RuntimeException.class, + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.combineResponses(failure, new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); - assertThat(e.getMessage(), equalTo("No indices (live or rollup) found during rollup search")); + assertThat(e.getMessage(), equalTo("Index [[foo]] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } - public void testMissingLiveIndex() { + public void testMissingLiveIndex() throws Exception { SearchResponse responseWithout = mock(SearchResponse.class); when(responseWithout.getTook()).thenReturn(new TimeValue(100)); List aggTree = new ArrayList<>(1); @@ -174,16 +175,13 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - SearchResponse response = RollupResponseTranslator.combineResponses(msearch, - new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); - assertNotNull(response); - Aggregations responseAggs = response.getAggregations(); - assertNotNull(responseAggs); - Avg avg = responseAggs.get("foo"); - assertThat(avg.getValue(), equalTo(5.0)); + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.combineResponses(msearch, + new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); + assertThat(e.getMessage(), equalTo("Index [[foo]] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } - public void testRolledMissingAggs() { + public void testRolledMissingAggs() throws Exception { SearchResponse responseWithout = mock(SearchResponse.class); when(responseWithout.getTook()).thenReturn(new TimeValue(100)); @@ -191,13 +189,12 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { when(responseWithout.getAggregations()).thenReturn(mockAggsWithout); MultiSearchResponse.Item[] msearch = new MultiSearchResponse.Item[]{ - new MultiSearchResponse.Item(null, new IndexNotFoundException("foo")), new MultiSearchResponse.Item(responseWithout, null)}; BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - SearchResponse response = RollupResponseTranslator.combineResponses(msearch, + SearchResponse response = RollupResponseTranslator.translateResponse(msearch, new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); assertNotNull(response); Aggregations responseAggs = response.getAggregations(); @@ -214,12 +211,13 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); ScriptService scriptService = mock(ScriptService.class); - SearchResponse finalResponse = RollupResponseTranslator.combineResponses(msearch, - new InternalAggregation.ReduceContext(bigArrays, scriptService, true)); - assertThat(finalResponse, equalTo(response)); + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.combineResponses(msearch, + new InternalAggregation.ReduceContext(bigArrays, scriptService, true))); + assertThat(e.getMessage(), equalTo("Index [[foo]] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } - public void testVerifyNormal() { + public void testVerifyNormal() throws Exception { SearchResponse response = mock(SearchResponse.class); MultiSearchResponse.Item item = new MultiSearchResponse.Item(response, null); @@ -234,7 +232,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { assertThat(e.getMessage(), equalTo("no such index [foo]")); } - public void testTranslateRollup() { + public void testTranslateRollup() throws Exception { SearchResponse response = mock(SearchResponse.class); when(response.getTook()).thenReturn(new TimeValue(100)); List aggTree = new ArrayList<>(1); @@ -285,9 +283,10 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { ScriptService scriptService = mock(ScriptService.class); InternalAggregation.ReduceContext context = new InternalAggregation.ReduceContext(bigArrays, scriptService, true); - Exception e = expectThrows(RuntimeException.class, + ResourceNotFoundException e = expectThrows(ResourceNotFoundException.class, () -> RollupResponseTranslator.translateResponse(new MultiSearchResponse.Item[]{missing}, context)); - assertThat(e.getMessage(), equalTo("no such index [foo]")); + assertThat(e.getMessage(), equalTo("Index [foo] was not found, likely because it was deleted while the request was in-flight. " + + "Rollup does not support partial search results, please try the request again.")); } public void testMissingFilter() { @@ -350,7 +349,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { equalTo("Expected [filter_foo] to be a FilterAggregation, but was [InternalMax]")); } - public void testSimpleReduction() { + public void testSimpleReduction() throws Exception { SearchResponse protoResponse = mock(SearchResponse.class); when(protoResponse.getTook()).thenReturn(new TimeValue(100)); List protoAggTree = new ArrayList<>(1); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index a795edca83e..448e901997f 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -584,7 +584,7 @@ public class SearchActionTests extends ESTestCase { assertThat(result.getJobCaps().size(), equalTo(1)); } - public void testLiveOnlyProcess() { + public void testLiveOnlyProcess() throws Exception { String[] indices = new String[]{"foo"}; IndexMetaData indexMetaData = mock(IndexMetaData.class); ImmutableOpenMap.Builder meta = ImmutableOpenMap.builder(1); @@ -601,7 +601,7 @@ public class SearchActionTests extends ESTestCase { assertThat(r, equalTo(response)); } - public void testRollupOnly() throws IOException { + public void testRollupOnly() throws Exception { String[] indices = new String[]{"foo"}; String jobName = randomAlphaOfLength(5); @@ -701,7 +701,7 @@ public class SearchActionTests extends ESTestCase { assertThat(e.getMessage(), equalTo("MSearch response was empty, cannot unroll RollupSearch results")); } - public void testBoth() throws IOException { + public void testBoth() throws Exception { String[] indices = new String[]{"foo", "bar"}; String jobName = randomAlphaOfLength(5); diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 6f99fe87bf4..e343b5906e7 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -302,10 +302,6 @@ if (project.runtimeJavaVersion > JavaVersion.VERSION_1_8) { ) } -run { - plugin xpackModule('core') -} - test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java index af6a7131056..bc2b27df580 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertGenUtils.java @@ -42,7 +42,6 @@ import javax.security.auth.x500.X500Principal; import java.io.IOException; import java.math.BigInteger; import java.net.InetAddress; -import java.net.SocketException; import java.security.KeyPair; import java.security.KeyPairGenerator; import java.security.KeyStore; @@ -268,7 +267,7 @@ public class CertGenUtils { /** * Converts the {@link InetAddress} objects into a {@link GeneralNames} object that is used to represent subject alternative names. */ - public static GeneralNames getSubjectAlternativeNames(boolean resolveName, Set addresses) throws SocketException { + public static GeneralNames getSubjectAlternativeNames(boolean resolveName, Set addresses) throws IOException { Set generalNameList = new HashSet<>(); for (InetAddress address : addresses) { if (address.isAnyLocalAddress()) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java index b76d3a48035..1fe3ed67f73 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -14,6 +14,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.collect.Tuple; @@ -35,6 +36,7 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.EmptyAuthorizationInfo; import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; @@ -43,7 +45,6 @@ import org.elasticsearch.xpack.security.audit.AuditTrail; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.audit.AuditUtil; import org.elasticsearch.xpack.security.authc.support.RealmUserLookup; -import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine.EmptyAuthorizationInfo; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; @@ -484,6 +485,13 @@ public class AuthenticationService { final String cause = tuple.v2() == null ? "" : " (Caused by " + tuple.v2() + ")"; logger.warn("Authentication to realm {} failed - {}{}", realm.name(), message, cause); }); + List unlicensedRealms = realms.getUnlicensedRealms(); + if (unlicensedRealms.isEmpty() == false) { + logger.warn("Authentication failed using realms [{}]." + + " Realms [{}] were skipped because they are not permitted on the current license", + Strings.collectionToCommaDelimitedString(defaultOrderedRealmList), + Strings.collectionToCommaDelimitedString(unlicensedRealms)); + } listener.onFailure(request.authenticationFailed(authenticationToken)); } else { threadContext.putTransient(AuthenticationResult.THREAD_CONTEXT_KEY, authenticationResult); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 925654fae8b..39b981b42e3 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -5,24 +5,8 @@ */ package org.elasticsearch.xpack.security.authc; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; @@ -39,6 +23,21 @@ import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + /** * Serves as a realms registry (also responsible for ordering the realms appropriately) */ @@ -119,6 +118,32 @@ public class Realms implements Iterable { } } + /** + * Returns a list of realms that are configured, but are not permitted under the current license. + */ + public List getUnlicensedRealms() { + // If auth is not allowed, then everything is unlicensed + if (licenseState.isAuthAllowed() == false) { + return Collections.unmodifiableList(realms); + } + + AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); + // If all realms are allowed, then nothing is unlicensed + if (allowedRealmType == AllowedRealmType.ALL) { + return Collections.emptyList(); + } + + final List allowedRealms = this.asList(); + // Shortcut for the typical case, all the configured realms are allowed + if (allowedRealms.equals(this.realms.size())) { + return Collections.emptyList(); + } + + // Otherwise, we return anything in "all realms" that is not in the allowed realm list + List unlicensed = realms.stream().filter(r -> allowedRealms.contains(r) == false).collect(Collectors.toList()); + return Collections.unmodifiableList(unlicensed); + } + public Stream stream() { return StreamSupport.stream(this.spliterator(), false); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java index 272ab283c75..f5f8c8592b2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectProviderConfiguration.java @@ -15,7 +15,6 @@ import java.util.Objects; * A Class that contains all the OpenID Connect Provider configuration */ public class OpenIdConnectProviderConfiguration { - private final String providerName; private final URI authorizationEndpoint; private final URI tokenEndpoint; private final URI userinfoEndpoint; @@ -23,9 +22,8 @@ public class OpenIdConnectProviderConfiguration { private final Issuer issuer; private final String jwkSetPath; - public OpenIdConnectProviderConfiguration(String providerName, Issuer issuer, String jwkSetPath, URI authorizationEndpoint, + public OpenIdConnectProviderConfiguration(Issuer issuer, String jwkSetPath, URI authorizationEndpoint, URI tokenEndpoint, @Nullable URI userinfoEndpoint, @Nullable URI endsessionEndpoint) { - this.providerName = Objects.requireNonNull(providerName, "OP Name must be provided"); this.authorizationEndpoint = Objects.requireNonNull(authorizationEndpoint, "Authorization Endpoint must be provided"); this.tokenEndpoint = Objects.requireNonNull(tokenEndpoint, "Token Endpoint must be provided"); this.userinfoEndpoint = userinfoEndpoint; @@ -34,10 +32,6 @@ public class OpenIdConnectProviderConfiguration { this.jwkSetPath = Objects.requireNonNull(jwkSetPath, "jwkSetUrl must be provided"); } - public String getProviderName() { - return providerName; - } - public URI getAuthorizationEndpoint() { return authorizationEndpoint; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java index 72b04951a91..5f876a677d6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealm.java @@ -69,7 +69,6 @@ import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectReal import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_ENDSESSION_ENDPOINT; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_ISSUER; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_JWKSET_PATH; -import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_NAME; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.OP_USERINFO_ENDPOINT; import static org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings.POPULATE_USER_METADATA; @@ -267,7 +266,6 @@ public class OpenIdConnectRealm extends Realm implements Releasable { } private OpenIdConnectProviderConfiguration buildOpenIdConnectProviderConfiguration(RealmConfig config) { - String providerName = require(config, OP_NAME); Issuer issuer = new Issuer(require(config, OP_ISSUER)); String jwkSetUrl = require(config, OP_JWKSET_PATH); @@ -303,7 +301,7 @@ public class OpenIdConnectRealm extends Realm implements Releasable { throw new SettingsException("Invalid URI: " + OP_ENDSESSION_ENDPOINT.getKey(), e); } - return new OpenIdConnectProviderConfiguration(providerName, issuer, jwkSetUrl, authorizationEndpoint, tokenEndpoint, + return new OpenIdConnectProviderConfiguration(issuer, jwkSetUrl, authorizationEndpoint, tokenEndpoint, userinfoEndpoint, endsessionEndpoint); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java index 596f9c1e6e1..043af216b8f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransport.java @@ -54,7 +54,7 @@ public class SecurityNetty4HttpServerTransport extends Netty4HttpServerTransport } @Override - protected void onException(HttpChannel channel, Exception e) { + public void onException(HttpChannel channel, Exception e) { securityExceptionHandler.accept(channel, e); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java index 9372cb1ec54..de1259765b9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java @@ -36,19 +36,22 @@ public final class SSLChannelContext extends SocketChannelContext { private static final Runnable DEFAULT_TIMEOUT_CANCELLER = () -> {}; private final SSLDriver sslDriver; + private final InboundChannelBuffer networkReadBuffer; private final LinkedList encryptedFlushes = new LinkedList<>(); private Runnable closeTimeoutCanceller = DEFAULT_TIMEOUT_CANCELLER; SSLChannelContext(NioSocketChannel channel, NioSelector selector, Consumer exceptionHandler, SSLDriver sslDriver, - ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { - this(channel, selector, exceptionHandler, sslDriver, readWriteHandler, channelBuffer, ALWAYS_ALLOW_CHANNEL); + ReadWriteHandler readWriteHandler, InboundChannelBuffer applicationBuffer) { + this(channel, selector, exceptionHandler, sslDriver, readWriteHandler, InboundChannelBuffer.allocatingInstance(), + applicationBuffer, ALWAYS_ALLOW_CHANNEL); } SSLChannelContext(NioSocketChannel channel, NioSelector selector, Consumer exceptionHandler, SSLDriver sslDriver, - ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer, + ReadWriteHandler readWriteHandler, InboundChannelBuffer networkReadBuffer, InboundChannelBuffer channelBuffer, Predicate allowChannelPredicate) { super(channel, selector, exceptionHandler, readWriteHandler, channelBuffer, allowChannelPredicate); this.sslDriver = sslDriver; + this.networkReadBuffer = networkReadBuffer; } @Override @@ -157,12 +160,12 @@ public final class SSLChannelContext extends SocketChannelContext { if (closeNow()) { return bytesRead; } - bytesRead = readFromChannel(sslDriver.getNetworkReadBuffer()); + bytesRead = readFromChannel(networkReadBuffer); if (bytesRead == 0) { return bytesRead; } - sslDriver.read(channelBuffer); + sslDriver.read(networkReadBuffer, channelBuffer); handleReadBytes(); // It is possible that a read call produced non-application bytes to flush @@ -201,7 +204,7 @@ public final class SSLChannelContext extends SocketChannelContext { getSelector().executeFailedListener(encryptedFlush.getListener(), new ClosedChannelException()); } encryptedFlushes.clear(); - IOUtils.close(super::closeFromSelector, sslDriver::close); + IOUtils.close(super::closeFromSelector, networkReadBuffer::close, sslDriver::close); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java index bc112dd3a60..e54bc9fa16e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security.transport.nio; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.Page; +import org.elasticsearch.nio.utils.ByteBufferUtils; import org.elasticsearch.nio.utils.ExceptionsHelper; import javax.net.ssl.SSLEngine; @@ -16,6 +17,7 @@ import javax.net.ssl.SSLException; import javax.net.ssl.SSLSession; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.function.IntFunction; /** * SSLDriver is a class that wraps the {@link SSLEngine} and attempts to simplify the API. The basic usage is @@ -27,9 +29,9 @@ import java.util.ArrayList; * application to be written to the wire. * * Handling reads from a channel with this class is very simple. When data has been read, call - * {@link #read(InboundChannelBuffer)}. If the data is application data, it will be decrypted and placed into - * the buffer passed as an argument. Otherwise, it will be consumed internally and advance the SSL/TLS close - * or handshake process. + * {@link #read(InboundChannelBuffer, InboundChannelBuffer)}. If the data is application data, it will be + * decrypted and placed into the application buffer passed as an argument. Otherwise, it will be consumed + * internally and advance the SSL/TLS close or handshake process. * * Producing writes for a channel is more complicated. The method {@link #needsNonApplicationWrite()} can be * called to determine if this driver needs to produce more data to advance the handshake or close process. @@ -54,21 +56,22 @@ public class SSLDriver implements AutoCloseable { private static final FlushOperation EMPTY_FLUSH_OPERATION = new FlushOperation(EMPTY_BUFFERS, (r, t) -> {}); private final SSLEngine engine; - // TODO: When the bytes are actually recycled, we need to test that they are released on driver close - private final SSLOutboundBuffer outboundBuffer = new SSLOutboundBuffer((n) -> new Page(ByteBuffer.allocate(n))); + private final IntFunction pageAllocator; + private final SSLOutboundBuffer outboundBuffer; + private Page networkReadPage; private final boolean isClientMode; // This should only be accessed by the network thread associated with this channel, so nothing needs to // be volatile. private Mode currentMode = new HandshakeMode(); - private ByteBuffer networkReadBuffer; private int packetSize; - public SSLDriver(SSLEngine engine, boolean isClientMode) { + public SSLDriver(SSLEngine engine, IntFunction pageAllocator, boolean isClientMode) { this.engine = engine; + this.pageAllocator = pageAllocator; + this.outboundBuffer = new SSLOutboundBuffer(pageAllocator); this.isClientMode = isClientMode; SSLSession session = engine.getSession(); packetSize = session.getPacketBufferSize(); - this.networkReadBuffer = ByteBuffer.allocate(packetSize); } public void init() throws SSLException { @@ -106,22 +109,25 @@ public class SSLDriver implements AutoCloseable { return currentMode.isHandshake(); } - public ByteBuffer getNetworkReadBuffer() { - return networkReadBuffer; - } - public SSLOutboundBuffer getOutboundBuffer() { return outboundBuffer; } - public void read(InboundChannelBuffer buffer) throws SSLException { - Mode modePriorToRead; - do { - modePriorToRead = currentMode; - currentMode.read(buffer); - // If we switched modes we want to read again as there might be unhandled bytes that need to be - // handled by the new mode. - } while (modePriorToRead != currentMode); + public void read(InboundChannelBuffer encryptedBuffer, InboundChannelBuffer applicationBuffer) throws SSLException { + networkReadPage = pageAllocator.apply(packetSize); + try { + Mode modePriorToRead; + do { + modePriorToRead = currentMode; + currentMode.read(encryptedBuffer, applicationBuffer); + // It is possible that we received multiple SSL packets from the network since the last read. + // If one of those packets causes us to change modes (such as finished handshaking), we need + // to call read in the new mode to handle the remaining packets. + } while (modePriorToRead != currentMode); + } finally { + networkReadPage.close(); + networkReadPage = null; + } } public boolean readyForApplicationWrites() { @@ -171,27 +177,34 @@ public class SSLDriver implements AutoCloseable { ExceptionsHelper.rethrowAndSuppress(closingExceptions); } - private SSLEngineResult unwrap(InboundChannelBuffer buffer) throws SSLException { + private SSLEngineResult unwrap(InboundChannelBuffer networkBuffer, InboundChannelBuffer applicationBuffer) throws SSLException { while (true) { - SSLEngineResult result = engine.unwrap(networkReadBuffer, buffer.sliceBuffersFrom(buffer.getIndex())); - buffer.incrementIndex(result.bytesProduced()); + ensureApplicationBufferSize(applicationBuffer); + ByteBuffer networkReadBuffer = networkReadPage.byteBuffer(); + networkReadBuffer.clear(); + ByteBufferUtils.copyBytes(networkBuffer.sliceBuffersTo(Math.min(networkBuffer.getIndex(), packetSize)), networkReadBuffer); + networkReadBuffer.flip(); + SSLEngineResult result = engine.unwrap(networkReadBuffer, applicationBuffer.sliceBuffersFrom(applicationBuffer.getIndex())); + networkBuffer.release(result.bytesConsumed()); + applicationBuffer.incrementIndex(result.bytesProduced()); switch (result.getStatus()) { case OK: - networkReadBuffer.compact(); return result; case BUFFER_UNDERFLOW: // There is not enough space in the network buffer for an entire SSL packet. Compact the // current data and expand the buffer if necessary. - int currentCapacity = networkReadBuffer.capacity(); - ensureNetworkReadBufferSize(); - if (currentCapacity == networkReadBuffer.capacity()) { - networkReadBuffer.compact(); + packetSize = engine.getSession().getPacketBufferSize(); + if (networkReadPage.byteBuffer().capacity() < packetSize) { + networkReadPage.close(); + networkReadPage = pageAllocator.apply(packetSize); + } else { + return result; } - return result; + break; case BUFFER_OVERFLOW: // There is not enough space in the application buffer for the decrypted message. Expand // the application buffer to ensure that it has enough space. - ensureApplicationBufferSize(buffer); + ensureApplicationBufferSize(applicationBuffer); break; case CLOSED: assert engine.isInboundDone() : "We received close_notify so read should be done"; @@ -254,15 +267,6 @@ public class SSLDriver implements AutoCloseable { } } - private void ensureNetworkReadBufferSize() { - packetSize = engine.getSession().getPacketBufferSize(); - if (networkReadBuffer.capacity() < packetSize) { - ByteBuffer newBuffer = ByteBuffer.allocate(packetSize); - networkReadBuffer.flip(); - newBuffer.put(networkReadBuffer); - } - } - // There are three potential modes for the driver to be in - HANDSHAKE, APPLICATION, or CLOSE. HANDSHAKE // is the initial mode. During this mode data that is read and written will be related to the TLS // handshake process. Application related data cannot be encrypted until the handshake is complete. From @@ -282,7 +286,7 @@ public class SSLDriver implements AutoCloseable { private interface Mode { - void read(InboundChannelBuffer buffer) throws SSLException; + void read(InboundChannelBuffer encryptedBuffer, InboundChannelBuffer applicationBuffer) throws SSLException; int write(FlushOperation applicationBytes) throws SSLException; @@ -342,13 +346,11 @@ public class SSLDriver implements AutoCloseable { } @Override - public void read(InboundChannelBuffer buffer) throws SSLException { - ensureApplicationBufferSize(buffer); + public void read(InboundChannelBuffer encryptedBuffer, InboundChannelBuffer applicationBuffer) throws SSLException { boolean continueUnwrap = true; - while (continueUnwrap && networkReadBuffer.position() > 0) { - networkReadBuffer.flip(); + while (continueUnwrap && encryptedBuffer.getIndex() > 0) { try { - SSLEngineResult result = unwrap(buffer); + SSLEngineResult result = unwrap(encryptedBuffer, applicationBuffer); handshakeStatus = result.getHandshakeStatus(); handshake(); // If we are done handshaking we should exit the handshake read @@ -430,12 +432,10 @@ public class SSLDriver implements AutoCloseable { private class ApplicationMode implements Mode { @Override - public void read(InboundChannelBuffer buffer) throws SSLException { - ensureApplicationBufferSize(buffer); + public void read(InboundChannelBuffer encryptedBuffer, InboundChannelBuffer applicationBuffer) throws SSLException { boolean continueUnwrap = true; - while (continueUnwrap && networkReadBuffer.position() > 0) { - networkReadBuffer.flip(); - SSLEngineResult result = unwrap(buffer); + while (continueUnwrap && encryptedBuffer.getIndex() > 0) { + SSLEngineResult result = unwrap(encryptedBuffer, applicationBuffer); boolean renegotiationRequested = result.getStatus() != SSLEngineResult.Status.CLOSED && maybeRenegotiation(result.getHandshakeStatus()); continueUnwrap = result.bytesProduced() > 0 && renegotiationRequested == false; @@ -515,7 +515,7 @@ public class SSLDriver implements AutoCloseable { } @Override - public void read(InboundChannelBuffer buffer) throws SSLException { + public void read(InboundChannelBuffer encryptedBuffer, InboundChannelBuffer applicationBuffer) throws SSLException { if (needToReceiveClose == false) { // There is an issue where receiving handshake messages after initiating the close process // can place the SSLEngine back into handshaking mode. In order to handle this, if we @@ -524,11 +524,9 @@ public class SSLDriver implements AutoCloseable { return; } - ensureApplicationBufferSize(buffer); boolean continueUnwrap = true; - while (continueUnwrap && networkReadBuffer.position() > 0) { - networkReadBuffer.flip(); - SSLEngineResult result = unwrap(buffer); + while (continueUnwrap && encryptedBuffer.getIndex() > 0) { + SSLEngineResult result = unwrap(encryptedBuffer, applicationBuffer); continueUnwrap = result.bytesProduced() > 0 || result.bytesConsumed() > 0; } if (engine.isInboundDone()) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java index 8ecba16fa46..ddf465f81d9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioHttpServerTransport.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.security.transport.nio; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; @@ -22,7 +21,6 @@ import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.Page; import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.threadpool.ThreadPool; @@ -35,11 +33,9 @@ import org.elasticsearch.xpack.security.transport.filter.IPFilter; import javax.net.ssl.SSLEngine; import java.io.IOException; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.function.Consumer; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; @@ -93,13 +89,9 @@ public class SecurityNioHttpServerTransport extends NioHttpServerTransport { @Override public NioHttpChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { NioHttpChannel httpChannel = new NioHttpChannel(channel); - Supplier pageSupplier = () -> { - Recycler.V bytes = pageCacheRecycler.bytePage(false); - return new Page(ByteBuffer.wrap(bytes.v()), bytes::close); - }; HttpReadWriteHandler httpHandler = new HttpReadWriteHandler(httpChannel,SecurityNioHttpServerTransport.this, - handlingSettings, corsConfig); - InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + handlingSettings, corsConfig, selector.getTaskScheduler(), threadPool::relativeTimeInNanos); + InboundChannelBuffer networkBuffer = new InboundChannelBuffer(pageAllocator); Consumer exceptionHandler = (e) -> securityExceptionHandler.accept(httpChannel, e); SocketChannelContext context; @@ -113,10 +105,12 @@ public class SecurityNioHttpServerTransport extends NioHttpServerTransport { } else { sslEngine = sslService.createSSLEngine(sslConfiguration, null, -1); } - SSLDriver sslDriver = new SSLDriver(sslEngine, false); - context = new SSLChannelContext(httpChannel, selector, exceptionHandler, sslDriver, httpHandler, buffer, nioIpFilter); + SSLDriver sslDriver = new SSLDriver(sslEngine, pageAllocator, false); + InboundChannelBuffer applicationBuffer = new InboundChannelBuffer(pageAllocator); + context = new SSLChannelContext(httpChannel, selector, exceptionHandler, sslDriver, httpHandler, networkBuffer, + applicationBuffer, nioIpFilter); } else { - context = new BytesChannelContext(httpChannel, selector, exceptionHandler, httpHandler, buffer, nioIpFilter); + context = new BytesChannelContext(httpChannel, selector, exceptionHandler, httpHandler, networkBuffer, nioIpFilter); } httpChannel.setContext(context); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java index d3f92a2575f..cf32809333e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -12,7 +12,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; -import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; @@ -21,7 +20,6 @@ import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.Page; import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.threadpool.ThreadPool; @@ -45,14 +43,12 @@ import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLParameters; import java.io.IOException; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.Collections; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; -import java.util.function.Supplier; import static org.elasticsearch.xpack.core.security.SecurityField.setting; @@ -156,20 +152,18 @@ public class SecurityNioTransport extends NioTransport { @Override public NioTcpChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { NioTcpChannel nioChannel = new NioTcpChannel(isClient == false, profileName, channel); - Supplier pageSupplier = () -> { - Recycler.V bytes = pageCacheRecycler.bytePage(false); - return new Page(ByteBuffer.wrap(bytes.v()), bytes::close); - }; TcpReadWriteHandler readWriteHandler = new TcpReadWriteHandler(nioChannel, SecurityNioTransport.this); - InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + InboundChannelBuffer networkBuffer = new InboundChannelBuffer(pageAllocator); Consumer exceptionHandler = (e) -> onException(nioChannel, e); SocketChannelContext context; if (sslEnabled) { - SSLDriver sslDriver = new SSLDriver(createSSLEngine(channel), isClient); - context = new SSLChannelContext(nioChannel, selector, exceptionHandler, sslDriver, readWriteHandler, buffer, ipFilter); + SSLDriver sslDriver = new SSLDriver(createSSLEngine(channel), pageAllocator, isClient); + InboundChannelBuffer applicationBuffer = new InboundChannelBuffer(pageAllocator); + context = new SSLChannelContext(nioChannel, selector, exceptionHandler, sslDriver, readWriteHandler, networkBuffer, + applicationBuffer, ipFilter); } else { - context = new BytesChannelContext(nioChannel, selector, exceptionHandler, readWriteHandler, buffer, ipFilter); + context = new BytesChannelContext(nioChannel, selector, exceptionHandler, readWriteHandler, networkBuffer, ipFilter); } nioChannel.setContext(context); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index c37d6913d1f..a26e05e5234 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -39,10 +40,13 @@ import java.util.TreeMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -104,6 +108,8 @@ public class RealmsTests extends ESTestCase { assertThat(realm.name(), equalTo("realm_" + index)); i++; } + + assertThat(realms.getUnlicensedRealms(), empty()); } public void testWithSettingsWhereDifferentRealmsHaveSameOrder() throws Exception { @@ -142,6 +148,8 @@ public class RealmsTests extends ESTestCase { assertThat(realm.type(), equalTo("type_" + nameToRealmId.get(expectedRealmName))); assertThat(realm.name(), equalTo(expectedRealmName)); } + + assertThat(realms.getUnlicensedRealms(), empty()); } public void testWithSettingsWithMultipleInternalRealmsOfSameType() throws Exception { @@ -175,6 +183,8 @@ public class RealmsTests extends ESTestCase { assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), empty()); } public void testUnlicensedWithOnlyCustomRealms() throws Exception { @@ -209,6 +219,8 @@ public class RealmsTests extends ESTestCase { i++; } + assertThat(realms.getUnlicensedRealms(), empty()); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); iter = realms.iterator(); @@ -225,6 +237,18 @@ public class RealmsTests extends ESTestCase { assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), iterableWithSize(randomRealmTypesCount)); + iter = realms.getUnlicensedRealms().iterator(); + i = 0; + while (iter.hasNext()) { + realm = iter.next(); + assertThat(realm.order(), equalTo(i)); + int index = orderToIndex.get(i); + assertThat(realm.type(), equalTo("type_" + index)); + assertThat(realm.name(), equalTo("realm_" + index)); + i++; + } + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); @@ -240,6 +264,18 @@ public class RealmsTests extends ESTestCase { assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(randomRealmTypesCount)); + iter = realms.getUnlicensedRealms().iterator(); + i = 0; + while (iter.hasNext()) { + realm = iter.next(); + assertThat(realm.order(), equalTo(i)); + int index = orderToIndex.get(i); + assertThat(realm.type(), equalTo("type_" + index)); + assertThat(realm.name(), equalTo("realm_" + index)); + i++; + } } public void testUnlicensedWithInternalRealms() throws Exception { @@ -266,6 +302,7 @@ public class RealmsTests extends ESTestCase { types.add(realm.type()); } assertThat(types, contains("ldap", "type_0")); + assertThat(realms.getUnlicensedRealms(), empty()); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); iter = realms.iterator(); @@ -280,6 +317,11 @@ public class RealmsTests extends ESTestCase { } assertThat(i, is(1)); + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo("type_0")); + assertThat(realm.name(), equalTo("custom")); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); assertThat(iter.hasNext(), is(true)); @@ -294,6 +336,14 @@ public class RealmsTests extends ESTestCase { assertThat(realm.type(), equalTo(NativeRealmSettings.TYPE)); assertThat(realm.name(), equalTo("default_" + NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(2)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo("ldap")); + assertThat(realm.name(), equalTo("foo")); + realm = realms.getUnlicensedRealms().get(1); + assertThat(realm.type(), equalTo("type_0")); + assertThat(realm.name(), equalTo("custom")); } public void testUnlicensedWithNativeRealmSettings() throws Exception { @@ -317,6 +367,7 @@ public class RealmsTests extends ESTestCase { realm = iter.next(); assertThat(realm.type(), is(type)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), empty()); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); @@ -327,10 +378,15 @@ public class RealmsTests extends ESTestCase { realm = iter.next(); assertThat(realm.type(), is(type)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo("ldap")); + assertThat(realm.name(), equalTo("foo")); } public void testUnlicensedWithNonStandardRealms() throws Exception { - final String selectedRealmType = randomFrom(SamlRealmSettings.TYPE, KerberosRealmSettings.TYPE); + final String selectedRealmType = randomFrom(SamlRealmSettings.TYPE, KerberosRealmSettings.TYPE, OpenIdConnectRealmSettings.TYPE); factories.put(selectedRealmType, config -> new DummyRealm(selectedRealmType, config)); Settings.Builder builder = Settings.builder() .put("path.home", createTempDir()) @@ -346,6 +402,7 @@ public class RealmsTests extends ESTestCase { realm = iter.next(); assertThat(realm.type(), is(selectedRealmType)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), empty()); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.DEFAULT); iter = realms.iterator(); @@ -360,6 +417,11 @@ public class RealmsTests extends ESTestCase { assertThat(realm.type(), is(NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo(selectedRealmType)); + assertThat(realm.name(), equalTo("foo")); + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); iter = realms.iterator(); assertThat(iter.hasNext(), is(true)); @@ -372,6 +434,11 @@ public class RealmsTests extends ESTestCase { realm = iter.next(); assertThat(realm.type(), is(NativeRealmSettings.TYPE)); assertThat(iter.hasNext(), is(false)); + + assertThat(realms.getUnlicensedRealms(), iterableWithSize(1)); + realm = realms.getUnlicensedRealms().get(0); + assertThat(realm.type(), equalTo(selectedRealmType)); + assertThat(realm.name(), equalTo("foo")); } public void testDisabledRealmsAreNotAdded() throws Exception { @@ -422,6 +489,11 @@ public class RealmsTests extends ESTestCase { } assertThat(count, equalTo(orderToIndex.size())); + assertThat(realms.getUnlicensedRealms(), empty()); + + // check that disabled realms are not included in unlicensed realms + when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NATIVE); + assertThat(realms.getUnlicensedRealms(), hasSize(orderToIndex.size())); } public void testAuthcAuthzDisabled() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java index 3bc89d29f8d..bccee36631e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java @@ -72,7 +72,6 @@ public class SecurityRealmSettingsTests extends SecurityIntegTestCase { .put("xpack.security.authc.realms.kerberos.kerb1.order", 7) .put("xpack.security.authc.realms.kerberos.kerb1.keytab.path", kerbKeyTab.toAbsolutePath()) .put("xpack.security.authc.realms.oidc.oidc1.order", 8) - .put("xpack.security.authc.realms.oidc.oidc1.op.name", "myprovider") .put("xpack.security.authc.realms.oidc.oidc1.op.issuer", "https://the.issuer.com:8090") .put("xpack.security.authc.realms.oidc.oidc1.op.jwkset_path", jwkSet.toAbsolutePath()) .put("xpack.security.authc.realms.oidc.oidc1.op.authorization_endpoint", "https://the.issuer.com:8090/login") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java index e7fdbfe558a..64e976d90d1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticatorTests.java @@ -633,7 +633,7 @@ public class OpenIdConnectAuthenticatorTests extends OpenIdConnectTestCase { } private OpenIdConnectProviderConfiguration getOpConfig() throws URISyntaxException { - return new OpenIdConnectProviderConfiguration("op_name", + return new OpenIdConnectProviderConfiguration( new Issuer("https://op.example.com"), "https://op.example.org/jwks.json", new URI("https://op.example.org/login"), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java index cd92168b3aa..8dbf27070c4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmSettingsTests.java @@ -37,7 +37,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") @@ -53,7 +52,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { public void testMissingAuthorizationEndpointThrowsError() { final Settings.Builder settingsBuilder = Settings.builder() .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") @@ -73,7 +71,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") @@ -90,7 +87,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") @@ -108,7 +104,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "This is not a uri") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") @@ -124,7 +119,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { final Settings.Builder settingsBuilder = Settings.builder() .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") @@ -140,7 +134,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { final Settings.Builder settingsBuilder = Settings.builder() .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") @@ -153,30 +146,12 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER))); } - public void testMissingNameTypeThrowsError() { - final Settings.Builder settingsBuilder = Settings.builder() - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); - SettingsException exception = expectThrows(SettingsException.class, () -> { - new OpenIdConnectRealm(buildConfig(settingsBuilder.build()), null, null); - }); - assertThat(exception.getMessage(), - Matchers.containsString(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME))); - } - public void testMissingRedirectUriThrowsError() { final Settings.Builder settingsBuilder = Settings.builder() .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_RESPONSE_TYPE), "code"); @@ -192,7 +167,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com") @@ -209,7 +183,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_CLIENT_ID), "rp-my") @@ -228,7 +201,6 @@ public class OpenIdConnectRealmSettingsTests extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.NAME_CLAIM.getPattern()), "^(.*)$") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java index 0d26c0b442c..151a7e1caea 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectRealmTests.java @@ -159,7 +159,6 @@ public class OpenIdConnectRealmTests extends OpenIdConnectTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") @@ -182,7 +181,6 @@ public class OpenIdConnectRealmTests extends OpenIdConnectTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") @@ -205,7 +203,6 @@ public class OpenIdConnectRealmTests extends OpenIdConnectTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") @@ -236,7 +233,6 @@ public class OpenIdConnectRealmTests extends OpenIdConnectTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") @@ -257,7 +253,6 @@ public class OpenIdConnectRealmTests extends OpenIdConnectTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_AUTHORIZATION_ENDPOINT), "https://op.example.com/login") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.com/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.com/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.my.com/cb") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java index df5acb0c3a7..9c1c4e98110 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectTestCase.java @@ -41,7 +41,6 @@ public abstract class OpenIdConnectTestCase extends ESTestCase { .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_TOKEN_ENDPOINT), "https://op.example.org/token") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ENDSESSION_ENDPOINT), "https://op.example.org/logout") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_ISSUER), "https://op.example.com") - .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_NAME), "the op") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.OP_JWKSET_PATH), "https://op.example.org/jwks.json") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.PRINCIPAL_CLAIM.getClaim()), "sub") .put(getFullSettingKey(REALM_NAME, OpenIdConnectRealmSettings.RP_REDIRECT_URI), "https://rp.elastic.co/cb") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java index dcccb23f1f6..6a380a8fab2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java @@ -52,7 +52,6 @@ public class SSLChannelContextTests extends ESTestCase { private BiConsumer listener; private Consumer exceptionHandler; private SSLDriver sslDriver; - private ByteBuffer readBuffer = ByteBuffer.allocate(1 << 14); private int messageLength; @Before @@ -76,7 +75,6 @@ public class SSLChannelContextTests extends ESTestCase { when(selector.isOnCurrentThread()).thenReturn(true); when(selector.getTaskScheduler()).thenReturn(nioTimer); - when(sslDriver.getNetworkReadBuffer()).thenReturn(readBuffer); when(sslDriver.getOutboundBuffer()).thenReturn(outboundBuffer); ByteBuffer buffer = ByteBuffer.allocate(1 << 14); when(selector.getIoBuffer()).thenAnswer(invocationOnMock -> { @@ -88,8 +86,12 @@ public class SSLChannelContextTests extends ESTestCase { public void testSuccessfulRead() throws IOException { byte[] bytes = createMessage(messageLength); - when(rawChannel.read(any(ByteBuffer.class))).thenReturn(bytes.length); - doAnswer(getReadAnswerForBytes(bytes)).when(sslDriver).read(channelBuffer); + when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> { + ByteBuffer buffer = (ByteBuffer) invocationOnMock.getArguments()[0]; + buffer.put(bytes); + return bytes.length; + }); + doAnswer(getReadAnswerForBytes(bytes)).when(sslDriver).read(any(InboundChannelBuffer.class), eq(channelBuffer)); when(readConsumer.apply(channelBuffer)).thenReturn(messageLength, 0); @@ -103,8 +105,12 @@ public class SSLChannelContextTests extends ESTestCase { public void testMultipleReadsConsumed() throws IOException { byte[] bytes = createMessage(messageLength * 2); - when(rawChannel.read(any(ByteBuffer.class))).thenReturn(bytes.length); - doAnswer(getReadAnswerForBytes(bytes)).when(sslDriver).read(channelBuffer); + when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> { + ByteBuffer buffer = (ByteBuffer) invocationOnMock.getArguments()[0]; + buffer.put(bytes); + return bytes.length; + }); + doAnswer(getReadAnswerForBytes(bytes)).when(sslDriver).read(any(InboundChannelBuffer.class), eq(channelBuffer)); when(readConsumer.apply(channelBuffer)).thenReturn(messageLength, messageLength, 0); @@ -118,8 +124,12 @@ public class SSLChannelContextTests extends ESTestCase { public void testPartialRead() throws IOException { byte[] bytes = createMessage(messageLength); - when(rawChannel.read(any(ByteBuffer.class))).thenReturn(bytes.length); - doAnswer(getReadAnswerForBytes(bytes)).when(sslDriver).read(channelBuffer); + when(rawChannel.read(any(ByteBuffer.class))).thenAnswer(invocationOnMock -> { + ByteBuffer buffer = (ByteBuffer) invocationOnMock.getArguments()[0]; + buffer.put(bytes); + return bytes.length; + }); + doAnswer(getReadAnswerForBytes(bytes)).when(sslDriver).read(any(InboundChannelBuffer.class), eq(channelBuffer)); when(readConsumer.apply(channelBuffer)).thenReturn(0); @@ -424,12 +434,12 @@ public class SSLChannelContextTests extends ESTestCase { private Answer getReadAnswerForBytes(byte[] bytes) { return invocationOnMock -> { - InboundChannelBuffer buffer = (InboundChannelBuffer) invocationOnMock.getArguments()[0]; - buffer.ensureCapacity(buffer.getIndex() + bytes.length); - ByteBuffer[] buffers = buffer.sliceBuffersFrom(buffer.getIndex()); + InboundChannelBuffer appBuffer = (InboundChannelBuffer) invocationOnMock.getArguments()[1]; + appBuffer.ensureCapacity(appBuffer.getIndex() + bytes.length); + ByteBuffer[] buffers = appBuffer.sliceBuffersFrom(appBuffer.getIndex()); assert buffers[0].remaining() > bytes.length; buffers[0].put(bytes); - buffer.incrementIndex(bytes.length); + appBuffer.incrementIndex(bytes.length); return bytes.length; }; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java index 376c4e1e99a..fba6db47c1b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java @@ -26,14 +26,16 @@ import java.security.SecureRandom; import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.function.Supplier; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.IntFunction; public class SSLDriverTests extends ESTestCase { - private final Supplier pageSupplier = () -> new Page(ByteBuffer.allocate(1 << 14), () -> {}); - private InboundChannelBuffer serverBuffer = new InboundChannelBuffer(pageSupplier); - private InboundChannelBuffer clientBuffer = new InboundChannelBuffer(pageSupplier); - private InboundChannelBuffer genericBuffer = new InboundChannelBuffer(pageSupplier); + private final IntFunction pageAllocator = (n) -> new Page(ByteBuffer.allocate(n), () -> {}); + + private final InboundChannelBuffer networkReadBuffer = new InboundChannelBuffer(pageAllocator); + private final InboundChannelBuffer applicationBuffer = new InboundChannelBuffer(pageAllocator); + private final AtomicInteger openPages = new AtomicInteger(0); public void testPingPongAndClose() throws Exception { SSLContext sslContext = getSSLContext(); @@ -44,19 +46,36 @@ public class SSLDriverTests extends ESTestCase { handshake(clientDriver, serverDriver); ByteBuffer[] buffers = {ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8))}; - sendAppData(clientDriver, serverDriver, buffers); - serverDriver.read(serverBuffer); - assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), serverBuffer.sliceBuffersTo(4)[0]); + sendAppData(clientDriver, buffers); + serverDriver.read(networkReadBuffer, applicationBuffer); + assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), applicationBuffer.sliceBuffersTo(4)[0]); + applicationBuffer.release(4); ByteBuffer[] buffers2 = {ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8))}; - sendAppData(serverDriver, clientDriver, buffers2); - clientDriver.read(clientBuffer); - assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + sendAppData(serverDriver, buffers2); + clientDriver.read(networkReadBuffer, applicationBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), applicationBuffer.sliceBuffersTo(4)[0]); + applicationBuffer.release(4); assertFalse(clientDriver.needsNonApplicationWrite()); normalClose(clientDriver, serverDriver); } + public void testDataStoredInOutboundBufferIsClosed() throws Exception { + SSLContext sslContext = getSSLContext(); + + SSLDriver clientDriver = getDriver(sslContext.createSSLEngine(), true); + SSLDriver serverDriver = getDriver(sslContext.createSSLEngine(), false); + + handshake(clientDriver, serverDriver); + + ByteBuffer[] buffers = {ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8))}; + serverDriver.write(new FlushOperation(buffers, (v, e) -> {})); + + expectThrows(SSLException.class, serverDriver::close); + assertEquals(0, openPages.get()); + } + public void testRenegotiate() throws Exception { SSLContext sslContext = getSSLContext(); @@ -73,9 +92,10 @@ public class SSLDriverTests extends ESTestCase { handshake(clientDriver, serverDriver); ByteBuffer[] buffers = {ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8))}; - sendAppData(clientDriver, serverDriver, buffers); - serverDriver.read(serverBuffer); - assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), serverBuffer.sliceBuffersTo(4)[0]); + sendAppData(clientDriver, buffers); + serverDriver.read(networkReadBuffer, applicationBuffer); + assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), applicationBuffer.sliceBuffersTo(4)[0]); + applicationBuffer.release(4); clientDriver.renegotiate(); assertTrue(clientDriver.isHandshaking()); @@ -83,17 +103,20 @@ public class SSLDriverTests extends ESTestCase { // This tests that the client driver can still receive data based on the prior handshake ByteBuffer[] buffers2 = {ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8))}; - sendAppData(serverDriver, clientDriver, buffers2); - clientDriver.read(clientBuffer); - assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + sendAppData(serverDriver, buffers2); + clientDriver.read(networkReadBuffer, applicationBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), applicationBuffer.sliceBuffersTo(4)[0]); + applicationBuffer.release(4); handshake(clientDriver, serverDriver, true); - sendAppData(clientDriver, serverDriver, buffers); - serverDriver.read(serverBuffer); - assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), serverBuffer.sliceBuffersTo(4)[0]); - sendAppData(serverDriver, clientDriver, buffers2); - clientDriver.read(clientBuffer); - assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + sendAppData(clientDriver, buffers); + serverDriver.read(networkReadBuffer, applicationBuffer); + assertEquals(ByteBuffer.wrap("ping".getBytes(StandardCharsets.UTF_8)), applicationBuffer.sliceBuffersTo(4)[0]); + applicationBuffer.release(4); + sendAppData(serverDriver, buffers2); + clientDriver.read(networkReadBuffer, applicationBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), applicationBuffer.sliceBuffersTo(4)[0]); + applicationBuffer.release(4); normalClose(clientDriver, serverDriver); } @@ -108,18 +131,22 @@ public class SSLDriverTests extends ESTestCase { ByteBuffer buffer = ByteBuffer.allocate(1 << 15); for (int i = 0; i < (1 << 15); ++i) { - buffer.put((byte) i); + buffer.put((byte) (i % 127)); } + buffer.flip(); ByteBuffer[] buffers = {buffer}; - sendAppData(clientDriver, serverDriver, buffers); - serverDriver.read(serverBuffer); - assertEquals(16384, serverBuffer.sliceBuffersFrom(0)[0].limit()); - assertEquals(16384, serverBuffer.sliceBuffersFrom(0)[1].limit()); + sendAppData(clientDriver, buffers); + serverDriver.read(networkReadBuffer, applicationBuffer); + ByteBuffer[] buffers1 = applicationBuffer.sliceBuffersFrom(0); + assertEquals((byte) (16383 % 127), buffers1[0].get(16383)); + assertEquals((byte) (32767 % 127), buffers1[1].get(16383)); + applicationBuffer.release(1 << 15); ByteBuffer[] buffers2 = {ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8))}; - sendAppData(serverDriver, clientDriver, buffers2); - clientDriver.read(clientBuffer); - assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), clientBuffer.sliceBuffersTo(4)[0]); + sendAppData(serverDriver, buffers2); + clientDriver.read(networkReadBuffer, applicationBuffer); + assertEquals(ByteBuffer.wrap("pong".getBytes(StandardCharsets.UTF_8)), applicationBuffer.sliceBuffersTo(4)[0]); + applicationBuffer.release(4); assertFalse(clientDriver.needsNonApplicationWrite()); normalClose(clientDriver, serverDriver); @@ -193,16 +220,16 @@ public class SSLDriverTests extends ESTestCase { serverDriver.initiateClose(); assertTrue(serverDriver.needsNonApplicationWrite()); assertFalse(serverDriver.isClosed()); - sendNonApplicationWrites(serverDriver, clientDriver); + sendNonApplicationWrites(serverDriver); // We are immediately fully closed due to SSLEngine inconsistency assertTrue(serverDriver.isClosed()); - // This should not throw exception yet as the SSLEngine will not UNWRAP data while attempting to WRAP - clientDriver.read(clientBuffer); - sendNonApplicationWrites(clientDriver, serverDriver); - clientDriver.read(clientBuffer); - sendNonApplicationWrites(clientDriver, serverDriver); - serverDriver.read(serverBuffer); + + SSLException sslException = expectThrows(SSLException.class, () -> clientDriver.read(networkReadBuffer, applicationBuffer)); + assertEquals("Received close_notify during handshake", sslException.getMessage()); + sendNonApplicationWrites(clientDriver); assertTrue(clientDriver.isClosed()); + + serverDriver.read(networkReadBuffer, applicationBuffer); } public void testCloseDuringHandshakePreJDK11() throws Exception { @@ -226,26 +253,28 @@ public class SSLDriverTests extends ESTestCase { serverDriver.initiateClose(); assertTrue(serverDriver.needsNonApplicationWrite()); assertFalse(serverDriver.isClosed()); - sendNonApplicationWrites(serverDriver, clientDriver); + sendNonApplicationWrites(serverDriver); // We are immediately fully closed due to SSLEngine inconsistency assertTrue(serverDriver.isClosed()); - SSLException sslException = expectThrows(SSLException.class, () -> clientDriver.read(clientBuffer)); + // This should not throw exception yet as the SSLEngine will not UNWRAP data while attempting to WRAP + + SSLException sslException = expectThrows(SSLException.class, () -> clientDriver.read(networkReadBuffer, applicationBuffer)); assertEquals("Received close_notify during handshake", sslException.getMessage()); - assertTrue(clientDriver.needsNonApplicationWrite()); - sendNonApplicationWrites(clientDriver, serverDriver); - serverDriver.read(serverBuffer); + sendNonApplicationWrites(clientDriver); assertTrue(clientDriver.isClosed()); + + serverDriver.read(networkReadBuffer, applicationBuffer); } private void failedCloseAlert(SSLDriver sendDriver, SSLDriver receiveDriver, List messages) throws SSLException { assertTrue(sendDriver.needsNonApplicationWrite()); assertFalse(sendDriver.isClosed()); - sendNonApplicationWrites(sendDriver, receiveDriver); + sendNonApplicationWrites(sendDriver); assertTrue(sendDriver.isClosed()); sendDriver.close(); - SSLException sslException = expectThrows(SSLException.class, () -> receiveDriver.read(genericBuffer)); + SSLException sslException = expectThrows(SSLException.class, () -> receiveDriver.read(networkReadBuffer, applicationBuffer)); assertTrue("Expected one of the following exception messages: " + messages + ". Found: " + sslException.getMessage(), messages.stream().anyMatch(m -> sslException.getMessage().equals(m))); if (receiveDriver.needsNonApplicationWrite() == false) { @@ -274,29 +303,30 @@ public class SSLDriverTests extends ESTestCase { sendDriver.initiateClose(); assertFalse(sendDriver.readyForApplicationWrites()); assertTrue(sendDriver.needsNonApplicationWrite()); - sendNonApplicationWrites(sendDriver, receiveDriver); + sendNonApplicationWrites(sendDriver); assertFalse(sendDriver.isClosed()); - receiveDriver.read(genericBuffer); + receiveDriver.read(networkReadBuffer, applicationBuffer); assertFalse(receiveDriver.isClosed()); assertFalse(receiveDriver.readyForApplicationWrites()); assertTrue(receiveDriver.needsNonApplicationWrite()); - sendNonApplicationWrites(receiveDriver, sendDriver); + sendNonApplicationWrites(receiveDriver); assertTrue(receiveDriver.isClosed()); - sendDriver.read(genericBuffer); + sendDriver.read(networkReadBuffer, applicationBuffer); assertTrue(sendDriver.isClosed()); sendDriver.close(); receiveDriver.close(); + assertEquals(0, openPages.get()); } - private void sendNonApplicationWrites(SSLDriver sendDriver, SSLDriver receiveDriver) throws SSLException { + private void sendNonApplicationWrites(SSLDriver sendDriver) throws SSLException { SSLOutboundBuffer outboundBuffer = sendDriver.getOutboundBuffer(); while (sendDriver.needsNonApplicationWrite() || outboundBuffer.hasEncryptedBytesToFlush()) { if (outboundBuffer.hasEncryptedBytesToFlush()) { - sendData(outboundBuffer.buildNetworkFlushOperation(), receiveDriver); + sendData(outboundBuffer.buildNetworkFlushOperation()); } else { sendDriver.nonApplicationWrite(); } @@ -342,8 +372,8 @@ public class SSLDriverTests extends ESTestCase { while (sendDriver.needsNonApplicationWrite() || outboundBuffer.hasEncryptedBytesToFlush()) { if (outboundBuffer.hasEncryptedBytesToFlush()) { - sendData(outboundBuffer.buildNetworkFlushOperation(), receiveDriver); - receiveDriver.read(genericBuffer); + sendData(outboundBuffer.buildNetworkFlushOperation()); + receiveDriver.read(networkReadBuffer, applicationBuffer); } else { sendDriver.nonApplicationWrite(); } @@ -353,37 +383,46 @@ public class SSLDriverTests extends ESTestCase { } } - private void sendAppData(SSLDriver sendDriver, SSLDriver receiveDriver, ByteBuffer[] message) throws IOException { + private void sendAppData(SSLDriver sendDriver, ByteBuffer[] message) throws IOException { assertFalse(sendDriver.needsNonApplicationWrite()); - int bytesToEncrypt = Arrays.stream(message).mapToInt(Buffer::remaining).sum(); - SSLOutboundBuffer outboundBuffer = sendDriver.getOutboundBuffer(); FlushOperation flushOperation = new FlushOperation(message, (r, l) -> {}); - int bytesEncrypted = 0; - while (bytesToEncrypt > bytesEncrypted) { - bytesEncrypted += sendDriver.write(flushOperation); - sendData(outboundBuffer.buildNetworkFlushOperation(), receiveDriver); + while (flushOperation.isFullyFlushed() == false) { + sendDriver.write(flushOperation); } + sendData(sendDriver.getOutboundBuffer().buildNetworkFlushOperation()); } - private void sendData(FlushOperation flushOperation, SSLDriver receiveDriver) { - ByteBuffer readBuffer = receiveDriver.getNetworkReadBuffer(); + private void sendData(FlushOperation flushOperation) { ByteBuffer[] writeBuffers = flushOperation.getBuffersToWrite(); - int bytesToEncrypt = Arrays.stream(writeBuffers).mapToInt(Buffer::remaining).sum(); - assert bytesToEncrypt < readBuffer.capacity() : "Flush operation must be less that read buffer"; + int bytesToCopy = Arrays.stream(writeBuffers).mapToInt(Buffer::remaining).sum(); + networkReadBuffer.ensureCapacity(bytesToCopy + networkReadBuffer.getIndex()); + ByteBuffer[] byteBuffers = networkReadBuffer.sliceBuffersFrom(0); assert writeBuffers.length > 0 : "No write buffers"; - for (ByteBuffer writeBuffer : writeBuffers) { - int written = writeBuffer.remaining(); + int r = 0; + while (flushOperation.isFullyFlushed() == false) { + ByteBuffer readBuffer = byteBuffers[r]; + ByteBuffer writeBuffer = flushOperation.getBuffersToWrite()[0]; + int toWrite = Math.min(writeBuffer.remaining(), readBuffer.remaining()); + writeBuffer.limit(writeBuffer.position() + toWrite); readBuffer.put(writeBuffer); - flushOperation.incrementIndex(written); + flushOperation.incrementIndex(toWrite); + if (readBuffer.remaining() == 0) { + r++; + } } + networkReadBuffer.incrementIndex(bytesToCopy); assertTrue(flushOperation.isFullyFlushed()); + flushOperation.getListener().accept(null, null); } private SSLDriver getDriver(SSLEngine engine, boolean isClient) { - return new SSLDriver(engine, isClient); + return new SSLDriver(engine, (n) -> { + openPages.incrementAndGet(); + return new Page(ByteBuffer.allocate(n), openPages::decrementAndGet); + }, isClient); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java index 9920293794e..6166d87703e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.parser; -import com.carrotsearch.hppc.ObjectShortHashMap; import org.antlr.v4.runtime.BaseErrorListener; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.CommonToken; @@ -26,16 +25,6 @@ import org.antlr.v4.runtime.tree.TerminalNode; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.xpack.sql.expression.Expression; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BackQuotedIdentifierContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.PrimaryExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryPrimaryDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryTermContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QuoteIdentifierContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.UnquoteIdentifierContext; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; @@ -50,7 +39,6 @@ import java.util.function.BiFunction; import java.util.function.Function; import static java.lang.String.format; -import static org.elasticsearch.xpack.sql.parser.AbstractBuilder.source; public class SqlParser { @@ -100,45 +88,49 @@ public class SqlParser { List params, Function parseFunction, BiFunction visitor) { - SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(sql)); + try { + SqlBaseLexer lexer = new SqlBaseLexer(new CaseInsensitiveStream(sql)); - lexer.removeErrorListeners(); - lexer.addErrorListener(ERROR_LISTENER); + lexer.removeErrorListeners(); + lexer.addErrorListener(ERROR_LISTENER); - Map paramTokens = new HashMap<>(); - TokenSource tokenSource = new ParametrizedTokenSource(lexer, paramTokens, params); + Map paramTokens = new HashMap<>(); + TokenSource tokenSource = new ParametrizedTokenSource(lexer, paramTokens, params); - CommonTokenStream tokenStream = new CommonTokenStream(tokenSource); - SqlBaseParser parser = new SqlBaseParser(tokenStream); + CommonTokenStream tokenStream = new CommonTokenStream(tokenSource); + SqlBaseParser parser = new SqlBaseParser(tokenStream); - parser.addParseListener(new CircuitBreakerListener()); - parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames()))); + parser.addParseListener(new PostProcessor(Arrays.asList(parser.getRuleNames()))); - parser.removeErrorListeners(); - parser.addErrorListener(ERROR_LISTENER); + parser.removeErrorListeners(); + parser.addErrorListener(ERROR_LISTENER); - parser.getInterpreter().setPredictionMode(PredictionMode.SLL); + parser.getInterpreter().setPredictionMode(PredictionMode.SLL); - if (DEBUG) { - debug(parser); - tokenStream.fill(); + if (DEBUG) { + debug(parser); + tokenStream.fill(); - for (Token t : tokenStream.getTokens()) { - String symbolicName = SqlBaseLexer.VOCABULARY.getSymbolicName(t.getType()); - String literalName = SqlBaseLexer.VOCABULARY.getLiteralName(t.getType()); - log.info(format(Locale.ROOT, " %-15s '%s'", + for (Token t : tokenStream.getTokens()) { + String symbolicName = SqlBaseLexer.VOCABULARY.getSymbolicName(t.getType()); + String literalName = SqlBaseLexer.VOCABULARY.getLiteralName(t.getType()); + log.info(format(Locale.ROOT, " %-15s '%s'", symbolicName == null ? literalName : symbolicName, t.getText())); + } } + + ParserRuleContext tree = parseFunction.apply(parser); + + if (DEBUG) { + log.info("Parse tree {} " + tree.toStringTree()); + } + + return visitor.apply(new AstBuilder(paramTokens), tree); + } catch (StackOverflowError e) { + throw new ParsingException("SQL statement is too large, " + + "causing stack overflow when generating the parsing tree: [{}]", sql); } - - ParserRuleContext tree = parseFunction.apply(parser); - - if (DEBUG) { - log.info("Parse tree {} " + tree.toStringTree()); - } - - return visitor.apply(new AstBuilder(paramTokens), tree); } private static void debug(SqlBaseParser parser) { @@ -221,93 +213,6 @@ public class SqlParser { } } - /** - * Used to catch large expressions that can lead to stack overflows - */ - static class CircuitBreakerListener extends SqlBaseBaseListener { - - private static final short MAX_RULE_DEPTH = 200; - - /** - * Due to the structure of the grammar and our custom handling in {@link ExpressionBuilder} - * some expressions can exit with a different class than they entered: - * e.g.: ValueExpressionContext can exit as ValueExpressionDefaultContext - */ - private static final Map ENTER_EXIT_RULE_MAPPING = new HashMap<>(); - - static { - ENTER_EXIT_RULE_MAPPING.put(StatementDefaultContext.class.getSimpleName(), StatementContext.class.getSimpleName()); - ENTER_EXIT_RULE_MAPPING.put(QueryPrimaryDefaultContext.class.getSimpleName(), QueryTermContext.class.getSimpleName()); - ENTER_EXIT_RULE_MAPPING.put(BooleanDefaultContext.class.getSimpleName(), BooleanExpressionContext.class.getSimpleName()); - } - - private boolean insideIn = false; - - // Keep current depth for every rule visited. - // The totalDepth alone cannot be used as expressions like: e1 OR e2 OR e3 OR ... - // are processed as e1 OR (e2 OR (e3 OR (... and this results in the totalDepth not growing - // while the stack call depth is, leading to a StackOverflowError. - private ObjectShortHashMap depthCounts = new ObjectShortHashMap<>(); - - @Override - public void enterEveryRule(ParserRuleContext ctx) { - if (inDetected(ctx)) { - insideIn = true; - } - - // Skip PrimaryExpressionContext for IN as it's not visited on exit due to - // the grammar's peculiarity rule with "predicated" and "predicate". - // Also skip the Identifiers as they are "cheap". - if (ctx.getClass() != UnquoteIdentifierContext.class && - ctx.getClass() != QuoteIdentifierContext.class && - ctx.getClass() != BackQuotedIdentifierContext.class && - ctx.getClass() != SqlBaseParser.ConstantContext.class && - ctx.getClass() != SqlBaseParser.NumberContext.class && - ctx.getClass() != SqlBaseParser.ValueExpressionContext.class && - (insideIn == false || ctx.getClass() != PrimaryExpressionContext.class)) { - - int currentDepth = depthCounts.putOrAdd(ctx.getClass().getSimpleName(), (short) 1, (short) 1); - if (currentDepth > MAX_RULE_DEPTH) { - throw new ParsingException(source(ctx), "SQL statement too large; " + - "halt parsing to prevent memory errors (stopped at depth {})", MAX_RULE_DEPTH); - } - } - super.enterEveryRule(ctx); - } - - @Override - public void exitEveryRule(ParserRuleContext ctx) { - if (inDetected(ctx)) { - insideIn = false; - } - - decrementCounter(ctx); - super.exitEveryRule(ctx); - } - - ObjectShortHashMap depthCounts() { - return depthCounts; - } - - private void decrementCounter(ParserRuleContext ctx) { - String className = ctx.getClass().getSimpleName(); - String classNameToDecrement = ENTER_EXIT_RULE_MAPPING.getOrDefault(className, className); - - // Avoid having negative numbers - if (depthCounts.containsKey(classNameToDecrement)) { - depthCounts.putOrAdd(classNameToDecrement, (short) 0, (short) -1); - } - } - - private boolean inDetected(ParserRuleContext ctx) { - if (ctx.getParent() != null && ctx.getParent().getClass() == SqlBaseParser.PredicateContext.class) { - SqlBaseParser.PredicateContext pc = (SqlBaseParser.PredicateContext) ctx.getParent(); - return pc.kind != null && pc.kind.getType() == SqlBaseParser.IN; - } - return false; - } - } - private static final BaseErrorListener ERROR_LISTENER = new BaseErrorListener() { @Override public void syntaxError(Recognizer recognizer, Object offendingSymbol, int line, diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java index d1e05b6ec53..f9b0fc18bca 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/SqlParserTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.parser; import com.google.common.base.Joiner; - import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.Order; @@ -18,19 +17,10 @@ import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredi import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.StringQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Add; -import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.In; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.BooleanExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryPrimaryDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.QueryTermContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StatementDefaultContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ValueExpressionContext; -import org.elasticsearch.xpack.sql.parser.SqlBaseParser.ValueExpressionDefaultContext; import org.elasticsearch.xpack.sql.plan.logical.Filter; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.logical.OrderBy; import org.elasticsearch.xpack.sql.plan.logical.Project; -import org.elasticsearch.xpack.sql.plan.logical.With; import java.util.ArrayList; import java.util.List; @@ -198,86 +188,44 @@ public class SqlParserTests extends ESTestCase { assertThat(mmqp.optionMap(), hasEntry("fuzzy_rewrite", "scoring_boolean")); } - public void testLimitToPreventStackOverflowFromLongListOfQuotedIdentifiers() { - // Create expression in the form of "t"."field","t"."field", ... - - // 200 elements is ok - new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(200, "\"t\".\"field\"")) + " FROM t"); - - // 201 elements parser's "circuit breaker" is triggered - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(201, "\"t\".\"field\"")) + " FROM t")); - assertEquals("line 1:2409: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitToPreventStackOverflowFromLongListOfUnQuotedIdentifiers() { - // Create expression in the form of t.field,t.field, ... - - // 250 elements is ok - new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(200, "t.field")) + " FROM t"); - - // 251 elements parser's "circuit breaker" is triggered - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement("SELECT " + - Joiner.on(",").join(nCopies(201, "t.field")) + " FROM t")); - assertEquals("line 1:1609: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitToPreventStackOverflowFromLargeUnaryBooleanExpression() { - // Create expression in the form of NOT(NOT(NOT ... (b) ...) - - // 99 elements is ok - new SqlParser().createExpression( - Joiner.on("").join(nCopies(99, "NOT(")).concat("b").concat(Joiner.on("").join(nCopies(99, ")")))); - - // 100 elements parser's "circuit breaker" is triggered - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createExpression( - Joiner.on("").join(nCopies(100, "NOT(")).concat("b").concat(Joiner.on("").join(nCopies(100, ")"))))); - assertEquals("line 1:402: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - public void testLimitToPreventStackOverflowFromLargeBinaryBooleanExpression() { // Create expression in the form of a = b OR a = b OR ... a = b - // 100 elements is ok - new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(100, "a = b"))); + // 1000 elements is ok + new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(1000, "a = b"))); - // 101 elements parser's "circuit breaker" is triggered + // 5000 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> - new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(101, "a = b")))); - assertEquals("line 1:902: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); + new SqlParser().createExpression(Joiner.on(" OR ").join(nCopies(5000, "a = b")))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } public void testLimitToPreventStackOverflowFromLargeUnaryArithmeticExpression() { // Create expression in the form of abs(abs(abs ... (i) ...) - // 199 elements is ok + // 200 elements is ok new SqlParser().createExpression( - Joiner.on("").join(nCopies(199, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(199, ")")))); + Joiner.on("").join(nCopies(200, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(200, ")")))); - // 200 elements parser's "circuit breaker" is triggered + // 5000 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createExpression( - Joiner.on("").join(nCopies(200, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(200, ")"))))); - assertEquals("line 1:802: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); + Joiner.on("").join(nCopies(1000, "abs(")).concat("i").concat(Joiner.on("").join(nCopies(1000, ")"))))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } public void testLimitToPreventStackOverflowFromLargeBinaryArithmeticExpression() { // Create expression in the form of a + a + a + ... + a - // 200 elements is ok - new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(200, "a"))); + // 1000 elements is ok + new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(1000, "a"))); - // 201 elements parser's "circuit breaker" is triggered + // 5000 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> - new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(201, "a")))); - assertEquals("line 1:802: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); + new SqlParser().createExpression(Joiner.on(" + ").join(nCopies(5000, "a")))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } public void testLimitToPreventStackOverflowFromLargeSubselectTree() { @@ -289,92 +237,13 @@ public class SqlParserTests extends ESTestCase { .concat("t") .concat(Joiner.on("").join(nCopies(199, ")")))); - // 201 elements parser's "circuit breaker" is triggered + // 500 elements cause stack overflow ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement( - Joiner.on(" (").join(nCopies(201, "SELECT * FROM")) + Joiner.on(" (").join(nCopies(500, "SELECT * FROM")) .concat("t") - .concat(Joiner.on("").join(nCopies(200, ")"))))); - assertEquals("line 1:3002: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitToPreventStackOverflowFromLargeComplexSubselectTree() { - // Test with queries in the form of `SELECT true OR true OR .. FROM (SELECT true OR true OR... FROM (... t) ...) - - new SqlParser().createStatement( - Joiner.on(" (").join(nCopies(20, "SELECT ")). - concat(Joiner.on(" OR ").join(nCopies(180, "true"))).concat(" FROM") - .concat("t").concat(Joiner.on("").join(nCopies(19, ")")))); - - ParsingException e = expectThrows(ParsingException.class, () -> new SqlParser().createStatement( - Joiner.on(" (").join(nCopies(20, "SELECT ")). - concat(Joiner.on(" OR ").join(nCopies(190, "true"))).concat(" FROM") - .concat("t").concat(Joiner.on("").join(nCopies(19, ")"))))); - assertEquals("line 1:1628: SQL statement too large; halt parsing to prevent memory errors (stopped at depth 200)", - e.getMessage()); - } - - public void testLimitStackOverflowForInAndLiteralsIsNotApplied() { - int noChildren = 10_000; - LogicalPlan plan = parseStatement("SELECT * FROM t WHERE a IN(" + - Joiner.on(",").join(nCopies(noChildren, "a + 10")) + "," + - Joiner.on(",").join(nCopies(noChildren, "-(-a - 10)")) + "," + - Joiner.on(",").join(nCopies(noChildren, "20")) + "," + - Joiner.on(",").join(nCopies(noChildren, "-20")) + "," + - Joiner.on(",").join(nCopies(noChildren, "20.1234")) + "," + - Joiner.on(",").join(nCopies(noChildren, "-20.4321")) + "," + - Joiner.on(",").join(nCopies(noChildren, "1.1234E56")) + "," + - Joiner.on(",").join(nCopies(noChildren, "-1.4321E-65")) + "," + - Joiner.on(",").join(nCopies(noChildren, "'foo'")) + "," + - Joiner.on(",").join(nCopies(noChildren, "'bar'")) + ")"); - - assertEquals(With.class, plan.getClass()); - assertEquals(Project.class, ((With) plan).child().getClass()); - assertEquals(Filter.class, ((Project) ((With) plan).child()).child().getClass()); - Filter filter = (Filter) ((Project) ((With) plan).child()).child(); - assertEquals(In.class, filter.condition().getClass()); - In in = (In) filter.condition(); - assertEquals("?a", in.value().toString()); - assertEquals(noChildren * 2 + 8, in.list().size()); - assertThat(in.list().get(0).toString(), startsWith("Add[?a,10]#")); - assertThat(in.list().get(noChildren).toString(), startsWith("Neg[Sub[Neg[?a]#")); - assertEquals("20", in.list().get(noChildren * 2).toString()); - assertEquals("-20", in.list().get(noChildren * 2 + 1).toString()); - assertEquals("20.1234", in.list().get(noChildren * 2 + 2).toString()); - assertEquals("-20.4321", in.list().get(noChildren * 2 + 3).toString()); - assertEquals("1.1234E56", in.list().get(noChildren * 2 + 4).toString()); - assertEquals("-1.4321E-65", in.list().get(noChildren * 2 + 5).toString()); - assertEquals("'foo'=foo", in.list().get(noChildren * 2 + 6).toString()); - assertEquals("'bar'=bar", in.list().get(noChildren * 2 + 7).toString()); - } - - public void testDecrementOfDepthCounter() { - SqlParser.CircuitBreakerListener cbl = new SqlParser.CircuitBreakerListener(); - StatementContext sc = new StatementContext(); - QueryTermContext qtc = new QueryTermContext(); - ValueExpressionContext vec = new ValueExpressionContext(); - BooleanExpressionContext bec = new BooleanExpressionContext(); - - cbl.enterEveryRule(sc); - cbl.enterEveryRule(sc); - cbl.enterEveryRule(qtc); - cbl.enterEveryRule(qtc); - cbl.enterEveryRule(qtc); - cbl.enterEveryRule(vec); - cbl.enterEveryRule(bec); - cbl.enterEveryRule(bec); - - cbl.exitEveryRule(new StatementDefaultContext(sc)); - cbl.exitEveryRule(new StatementDefaultContext(sc)); - cbl.exitEveryRule(new QueryPrimaryDefaultContext(qtc)); - cbl.exitEveryRule(new QueryPrimaryDefaultContext(qtc)); - cbl.exitEveryRule(new ValueExpressionDefaultContext(vec)); - cbl.exitEveryRule(new SqlBaseParser.BooleanDefaultContext(bec)); - - assertEquals(0, cbl.depthCounts().get(SqlBaseParser.StatementContext.class.getSimpleName())); - assertEquals(1, cbl.depthCounts().get(SqlBaseParser.QueryTermContext.class.getSimpleName())); - assertEquals(0, cbl.depthCounts().get(SqlBaseParser.ValueExpressionContext.class.getSimpleName())); - assertEquals(1, cbl.depthCounts().get(SqlBaseParser.BooleanExpressionContext.class.getSimpleName())); + .concat(Joiner.on("").join(nCopies(499, ")"))))); + assertThat(e.getMessage(), + startsWith("line -1:0: SQL statement is too large, causing stack overflow when generating the parsing tree: [")); } private LogicalPlan parseStatement(String sql) { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index dede9e55999..94388784177 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -102,3 +102,18 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } +--- +"Test preview with non-existing source index": + - do: + catch: /Source index \[does_not_exist\] does not exist/ + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": ["airline-data", "does_not_exist"] }, + "pivot": { + "group_by": { + "airline": {"terms": {"field": "airline"}}, + "by-hour": {"date_histogram": {"interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index b7f951679d1..fa608cefd1e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -190,4 +190,115 @@ setup: transform_id: "_all" from: 0 size: 10000 +--- +"Test transform where dest is included in source": + - do: + catch: /Destination index \[airline-data-by-airline\] is included in source expression \[airline-data/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { + "index": ["airline-data*"] + }, + "dest": { "index": "airline-data-by-airline" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } +--- +"Test transform where dest is a simple index pattern": + - do: + catch: /Destination index .* should refer to a single index/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { + "index": ["airline-data*"] + }, + "dest": { "index": "destination*" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } +--- +"Test alias scenarios": + - do: + indices.create: + index: created-destination-index + - do: + indices.create: + index: second-created-destination-index + - do: + indices.put_alias: + index: airline-data + name: source-index + - do: + indices.put_alias: + index: created-destination-index + name: dest-index + - do: + data_frame.put_data_frame_transform: + transform_id: "transform-from-aliases" + body: > + { + "source": { + "index": "source-index" + }, + "dest": { "index": "dest-index" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - match: { acknowledged: true } + - do: + indices.put_alias: + index: created-destination-index + name: source-index + + - do: + catch: /Destination index \[created-destination-index\] is included in source expression \[airline-data,created-destination-index\]/ + data_frame.put_data_frame_transform: + transform_id: "transform-from-aliases-failures" + body: > + { + "source": { + "index": "source-index" + }, + "dest": { "index": "dest-index" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + + - do: + indices.delete_alias: + index: created-destination-index + name: source-index + + - do: + indices.put_alias: + index: second-created-destination-index + name: dest-index + + - do: + catch: /Destination index \[dest-index\] should refer to a single index/ + data_frame.put_data_frame_transform: + transform_id: "airline-transform" + body: > + { + "source": { + "index": ["source-index"] + }, + "dest": { "index": "dest-index" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index 0f3488d146a..e93a0deb037 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -1217,4 +1217,49 @@ setup: - match: { aggregations.date_histogram#histo.buckets.3.doc_count: 20 } - match: { aggregations.date_histogram#histo.buckets.3.max#the_max.value: 4 } +--- +"Search error against live index": + + - do: + catch: bad_request + rollup.rollup_search: + index: "foo" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "asdfasdf" + + +--- +"Search error against rollup and live index": + + - do: + catch: bad_request + rollup.rollup_search: + index: "foo*" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "asdfasdf" + +--- +"Search error no matching indices": + + - do: + catch: /Must specify at least one concrete index/ + rollup.rollup_search: + index: "bar*" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + interval: "1h" diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index 5719f03fc6a..09660e336e8 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -111,10 +111,6 @@ if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { ) } -run { - plugin xpackModule('core') -} - test { /* * We have to disable setting the number of available processors as tests in the same JVM randomize processors and will step on each diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index a457c1052ca..f6c46f6c68f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -36,6 +36,7 @@ public class RejectedExecutionTests extends AbstractWatcherIntegrationTestCase { return false; } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41734") public void testHistoryAndTriggeredOnRejection() throws Exception { WatcherClient watcherClient = watcherClient(); createIndex("idx"); diff --git a/x-pack/qa/oidc-op-tests/build.gradle b/x-pack/qa/oidc-op-tests/build.gradle index 72fd21c9932..52e581f60a5 100644 --- a/x-pack/qa/oidc-op-tests/build.gradle +++ b/x-pack/qa/oidc-op-tests/build.gradle @@ -32,7 +32,6 @@ integTestCluster { setting 'xpack.security.authc.realms.native.native.order', '1' // OpenID Connect Realm 1 configured for authorization grant flow setting 'xpack.security.authc.realms.oidc.c2id.order', '2' - setting 'xpack.security.authc.realms.oidc.c2id.op.name', 'c2id-op' setting 'xpack.security.authc.realms.oidc.c2id.op.issuer', 'http://localhost:8080' setting 'xpack.security.authc.realms.oidc.c2id.op.authorization_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id-login" setting 'xpack.security.authc.realms.oidc.c2id.op.token_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id/token" @@ -48,7 +47,6 @@ integTestCluster { setting 'xpack.security.authc.realms.oidc.c2id.claims.groups', 'groups' // OpenID Connect Realm 2 configured for implicit flow setting 'xpack.security.authc.realms.oidc.c2id-implicit.order', '3' - setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.name', 'c2id-implicit' setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.issuer', 'http://localhost:8080' setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.authorization_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id-login" setting 'xpack.security.authc.realms.oidc.c2id-implicit.op.token_endpoint', "http://127.0.0.1:${-> ephemeralPort}/c2id/token" diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 4d8da870e33..8e87a1258ba 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -145,9 +145,11 @@ project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackRestSpec) repositories { maven { + name "elastic" url "https://artifacts.elastic.co/maven" } maven { + name "elastic-snapshots" url "https://snapshots.elastic.co/maven" } } diff --git a/x-pack/qa/security-setup-password-tests/build.gradle b/x-pack/qa/security-setup-password-tests/build.gradle index c0801a38b57..a99fa2d5438 100644 --- a/x-pack/qa/security-setup-password-tests/build.gradle +++ b/x-pack/qa/security-setup-password-tests/build.gradle @@ -10,6 +10,7 @@ dependencies { integTestRunner { systemProperty 'tests.security.manager', 'false' + // TODO add tests.config.dir = {cluster.singleNode().getConfigDir()} when converting to testclusters } integTestCluster {