diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 793a6540f38..51819b56a14 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -164,6 +164,10 @@ if (project != rootProject) { apply plugin: 'nebula.maven-base-publish' apply plugin: 'nebula.maven-scm' + // we need to apply these again to override the build plugin + targetCompatibility = "10" + sourceCompatibility = "10" + // groovydoc succeeds, but has some weird internal exception... groovydoc.enabled = false diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 7032b05ed90..bf06ac34766 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -217,7 +217,7 @@ class PrecommitTasks { private static Task configureNamingConventions(Project project) { if (project.sourceSets.findByName("test")) { Task namingConventionsTask = project.tasks.create('namingConventions', NamingConventionsTask) - namingConventionsTask.javaHome = project.runtimeJavaHome + namingConventionsTask.javaHome = project.compilerJavaHome return namingConventionsTask } return null diff --git a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java index 5027a440337..0174f576e2b 100644 --- a/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java +++ b/buildSrc/src/main/java/org/elasticsearch/GradleServicesAdapter.java @@ -41,7 +41,7 @@ import java.io.File; */ public class GradleServicesAdapter { - public final Project project; + private final Project project; public GradleServicesAdapter(Project project) { this.project = project; diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java index 365a12c076c..721eddb5291 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Distribution.java @@ -20,17 +20,23 @@ package org.elasticsearch.gradle; public enum Distribution { - INTEG_TEST("integ-test"), - ZIP("elasticsearch"), - ZIP_OSS("elasticsearch-oss"); + INTEG_TEST("integ-test", "zip"), + ZIP("elasticsearch", "zip"), + ZIP_OSS("elasticsearch-oss", "zip"); private final String fileName; + private final String fileExtension; - Distribution(String name) { + Distribution(String name, String fileExtension) { this.fileName = name; + this.fileExtension = fileExtension; } public String getFileName() { return fileName; } + + public String getFileExtension() { + return fileExtension; + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 4c7e84c423e..fa4415bbe1e 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -20,25 +20,67 @@ package org.elasticsearch.gradle.testclusters; import org.elasticsearch.GradleServicesAdapter; import org.elasticsearch.gradle.Distribution; +import org.elasticsearch.gradle.Version; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; +import org.gradle.internal.os.OperatingSystem; +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.HttpURLConnection; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static java.util.Objects.requireNonNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; public class ElasticsearchNode { + private final Logger logger = Logging.getLogger(ElasticsearchNode.class); private final String name; private final GradleServicesAdapter services; private final AtomicBoolean configurationFrozen = new AtomicBoolean(false); - private final Logger logger = Logging.getLogger(ElasticsearchNode.class); + private final File artifactsExtractDir; + private final File workingDir; + + private static final int ES_DESTROY_TIMEOUT = 20; + private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS; + private static final int NODE_UP_TIMEOUT = 30; + private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.SECONDS; + private final LinkedHashMap> waitConditions; private Distribution distribution; private String version; + private File javaHome; + private volatile Process esProcess; + private final String path; - public ElasticsearchNode(String name, GradleServicesAdapter services) { + ElasticsearchNode(String path, String name, GradleServicesAdapter services, File artifactsExtractDir, File workingDirBase) { + this.path = path; this.name = name; this.services = services; + this.artifactsExtractDir = artifactsExtractDir; + this.workingDir = new File(workingDirBase, safeName(name)); + this.waitConditions = new LinkedHashMap<>(); + waitConditions.put("http ports file", node -> node.getHttpPortsFile().exists()); + waitConditions.put("transport ports file", node -> node.getTransportPortFile().exists()); + waitForUri("cluster health yellow", "/_cluster/health?wait_for_nodes=>=1&wait_for_status=yellow"); } public String getName() { @@ -50,6 +92,7 @@ public class ElasticsearchNode { } public void setVersion(String version) { + requireNonNull(version, "null version passed when configuring test cluster `" + this + "`"); checkFrozen(); this.version = version; } @@ -59,22 +102,258 @@ public class ElasticsearchNode { } public void setDistribution(Distribution distribution) { + requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`"); checkFrozen(); this.distribution = distribution; } - void start() { - logger.info("Starting `{}`", this); - } - - void stop(boolean tailLogs) { - logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs); - } - public void freeze() { + requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`"); + requireNonNull(version, "null version passed when configuring test cluster `" + this + "`"); logger.info("Locking configuration of `{}`", this); configurationFrozen.set(true); - Objects.requireNonNull(version, "Version of test cluster `" + this + "` can't be null"); + } + + public void setJavaHome(File javaHome) { + requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`"); + checkFrozen(); + if (javaHome.exists() == false) { + throw new TestClustersException("java home for `" + this + "` does not exists: `" + javaHome + "`"); + } + this.javaHome = javaHome; + } + + public File getJavaHome() { + return javaHome; + } + + private void waitForUri(String description, String uri) { + waitConditions.put(description, (node) -> { + try { + URL url = new URL("http://" + this.getHttpPortInternal().get(0) + uri); + HttpURLConnection con = (HttpURLConnection) url.openConnection(); + con.setRequestMethod("GET"); + con.setConnectTimeout(500); + con.setReadTimeout(500); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream()))) { + String response = reader.lines().collect(Collectors.joining("\n")); + logger.info("{} -> {} ->\n{}", this, uri, response); + } + return true; + } catch (IOException e) { + throw new IllegalStateException("Connection attempt to " + this + " failed", e); + } + }); + } + + synchronized void start() { + logger.info("Starting `{}`", this); + + File distroArtifact = new File( + new File(artifactsExtractDir, distribution.getFileExtension()), + distribution.getFileName() + "-" + getVersion() + ); + if (distroArtifact.exists() == false) { + throw new TestClustersException("Can not start " + this + ", missing: " + distroArtifact); + } + if (distroArtifact.isDirectory() == false) { + throw new TestClustersException("Can not start " + this + ", is not a directory: " + distroArtifact); + } + services.sync(spec -> { + spec.from(new File(distroArtifact, "config")); + spec.into(getConfigFile().getParent()); + }); + configure(); + startElasticsearchProcess(distroArtifact); + } + + private void startElasticsearchProcess(File distroArtifact) { + logger.info("Running `bin/elasticsearch` in `{}` for {}", workingDir, this); + final ProcessBuilder processBuilder = new ProcessBuilder(); + if (OperatingSystem.current().isWindows()) { + processBuilder.command( + "cmd", "/c", + new File(distroArtifact, "\\bin\\elasticsearch.bat").getAbsolutePath() + ); + } else { + processBuilder.command( + new File(distroArtifact.getAbsolutePath(), "bin/elasticsearch").getAbsolutePath() + ); + } + try { + processBuilder.directory(workingDir); + Map environment = processBuilder.environment(); + // Don't inherit anything from the environment for as that would lack reproductability + environment.clear(); + if (javaHome != null) { + environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); + } else if (System.getenv().get("JAVA_HOME") != null) { + logger.warn("{}: No java home configured will use it from environment: {}", + this, System.getenv().get("JAVA_HOME") + ); + environment.put("JAVA_HOME", System.getenv().get("JAVA_HOME")); + } else { + logger.warn("{}: No javaHome configured, will rely on default java detection", this); + } + environment.put("ES_PATH_CONF", getConfigFile().getParentFile().getAbsolutePath()); + environment.put("ES_JAVA_OPTIONS", "-Xms512m -Xmx512m"); + // don't buffer all in memory, make sure we don't block on the default pipes + processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(getStdErrFile())); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(getStdoutFile())); + esProcess = processBuilder.start(); + } catch (IOException e) { + throw new TestClustersException("Failed to start ES process for " + this, e); + } + } + + public String getHttpSocketURI() { + waitForAllConditions(); + return getHttpPortInternal().get(0); + } + + public String getTransportPortURI() { + waitForAllConditions(); + return getTransportPortInternal().get(0); + } + + synchronized void stop(boolean tailLogs) { + if (esProcess == null && tailLogs) { + // This is a special case. If start() throws an exception the plugin will still call stop + // Another exception here would eat the orriginal. + return; + } + logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs); + requireNonNull(esProcess, "Can't stop `" + this + "` as it was not started or already stopped."); + stopHandle(esProcess.toHandle()); + if (tailLogs) { + logFileContents("Standard output of node", getStdoutFile()); + logFileContents("Standard error of node", getStdErrFile()); + } + esProcess = null; + } + + private void stopHandle(ProcessHandle processHandle) { + // Stop all children first, ES could actually be a child when there's some wrapper process like on Windows. + if (processHandle.isAlive()) { + processHandle.children().forEach(this::stopHandle); + } + logProcessInfo("Terminating elasticsearch process:", processHandle.info()); + if (processHandle.isAlive()) { + processHandle.destroy(); + } else { + logger.info("Process was not running when we tried to terminate it."); + } + waitForProcessToExit(processHandle); + if (processHandle.isAlive()) { + logger.info("process did not terminate after {} {}, stopping it forcefully", + ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT + ); + processHandle.destroyForcibly(); + } + waitForProcessToExit(processHandle); + if (processHandle.isAlive()) { + throw new TestClustersException("Was not able to terminate es process"); + } + } + + private void logProcessInfo(String prefix, ProcessHandle.Info info) { + logger.info(prefix + " commandLine:`{}` command:`{}` args:`{}`", + info.commandLine().orElse("-"), info.command().orElse("-"), + Arrays.stream(info.arguments().orElse(new String[]{})) + .map(each -> "'" + each + "'") + .collect(Collectors.joining(" ")) + ); + } + + private void logFileContents(String description, File from) { + logger.error("{} `{}`", description, this); + try (BufferedReader reader = new BufferedReader(new FileReader(from))) { + reader.lines() + .map(line -> " [" + name + "]" + line) + .forEach(logger::error); + } catch (IOException e) { + throw new TestClustersException("Error reading " + description, e); + } + } + + private void waitForProcessToExit(ProcessHandle processHandle) { + try { + processHandle.onExit().get(ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT); + } catch (InterruptedException e) { + logger.info("Interrupted while waiting for ES process", e); + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + logger.info("Failure while waiting for process to exist", e); + } catch (TimeoutException e) { + logger.info("Timed out waiting for process to exit", e); + } + } + + private File getConfigFile() { + return new File(workingDir, "config/elasticsearch.yml"); + } + + private File getConfPathData() { + return new File(workingDir, "data"); + } + + private File getConfPathSharedData() { + return new File(workingDir, "sharedData"); + } + + private File getConfPathRepo() { + return new File(workingDir, "repo"); + } + + private File getConfPathLogs() { + return new File(workingDir, "logs"); + } + + private File getStdoutFile() { + return new File(getConfPathLogs(), "es.stdout.log"); + } + + private File getStdErrFile() { + return new File(getConfPathLogs(), "es.stderr.log"); + } + + private void configure() { + getConfigFile().getParentFile().mkdirs(); + getConfPathRepo().mkdirs(); + getConfPathData().mkdirs(); + getConfPathSharedData().mkdirs(); + getConfPathLogs().mkdirs(); + LinkedHashMap config = new LinkedHashMap<>(); + config.put("cluster.name", "cluster-" + safeName(name)); + config.put("node.name", "node-" + safeName(name)); + config.put("path.repo", getConfPathRepo().getAbsolutePath()); + config.put("path.data", getConfPathData().getAbsolutePath()); + config.put("path.logs", getConfPathLogs().getAbsolutePath()); + config.put("path.shared_data", getConfPathSharedData().getAbsolutePath()); + config.put("node.attr.testattr", "test"); + config.put("node.portsfile", "true"); + config.put("http.port", "0"); + config.put("transport.tcp.port", "0"); + // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space + config.put("cluster.routing.allocation.disk.watermark.low", "1b"); + config.put("cluster.routing.allocation.disk.watermark.high", "1b"); + // increase script compilation limit since tests can rapid-fire script compilations + config.put("script.max_compilations_rate", "2048/1m"); + if (Version.fromString(version).getMajor() >= 6) { + config.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); + } + try { + Files.write( + getConfigFile().toPath(), + config.entrySet().stream() + .map(entry -> entry.getKey() + ": " + entry.getValue()) + .collect(Collectors.joining("\n")) + .getBytes(StandardCharsets.UTF_8) + ); + } catch (IOException e) { + throw new TestClustersException("Could not write config file: " + getConfigFile(), e); + } + logger.info("Written config file:{} for {}", getConfigFile(), this); } private void checkFrozen() { @@ -83,21 +362,121 @@ public class ElasticsearchNode { } } + private static String safeName(String name) { + return name + .replaceAll("^[^a-zA-Z0-9]+", "") + .replaceAll("[^a-zA-Z0-9]+", "-"); + } + + private File getHttpPortsFile() { + return new File(getConfPathLogs(), "http.ports"); + } + + private File getTransportPortFile() { + return new File(getConfPathLogs(), "transport.ports"); + } + + private List getTransportPortInternal() { + File transportPortFile = getTransportPortFile(); + try { + return readPortsFile(getTransportPortFile()); + } catch (IOException e) { + throw new TestClustersException( + "Failed to read transport ports file: " + transportPortFile + " for " + this, e + ); + } + } + + private List getHttpPortInternal() { + File httpPortsFile = getHttpPortsFile(); + try { + return readPortsFile(getHttpPortsFile()); + } catch (IOException e) { + throw new TestClustersException( + "Failed to read http ports file: " + httpPortsFile + " for " + this, e + ); + } + } + + private List readPortsFile(File file) throws IOException { + try (BufferedReader reader = new BufferedReader(new FileReader(file))) { + return reader.lines() + .map(String::trim) + .collect(Collectors.toList()); + } + } + + private void waitForAllConditions() { + requireNonNull(esProcess, "Can't wait for `" + this + "` as it was stopped."); + long startedAt = System.currentTimeMillis(); + logger.info("Starting to wait for cluster to come up"); + waitConditions.forEach((description, predicate) -> { + long thisConditionStartedAt = System.currentTimeMillis(); + boolean conditionMet = false; + Throwable lastException = null; + while ( + System.currentTimeMillis() - startedAt < MILLISECONDS.convert(NODE_UP_TIMEOUT, NODE_UP_TIMEOUT_UNIT) + ) { + if (esProcess.isAlive() == false) { + throw new TestClustersException( + "process was found dead while waiting for " + description + ", " + this + ); + } + try { + if(predicate.test(this)) { + conditionMet = true; + break; + } + } catch (TestClustersException e) { + throw new TestClustersException(e); + } catch (Exception e) { + if (lastException == null) { + lastException = e; + } else { + e.addSuppressed(lastException); + lastException = e; + } + } + try { + Thread.sleep(500); + } + catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + if (conditionMet == false) { + String message = "`" + this + "` failed to wait for " + description + " after " + + NODE_UP_TIMEOUT + " " + NODE_UP_TIMEOUT_UNIT; + if (lastException == null) { + throw new TestClustersException(message); + } else { + throw new TestClustersException(message, lastException); + } + } + logger.info( + "{}: {} took {} seconds", + this, description, + SECONDS.convert(System.currentTimeMillis() - thisConditionStartedAt, MILLISECONDS) + ); + }); + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ElasticsearchNode that = (ElasticsearchNode) o; - return Objects.equals(name, that.name); + return Objects.equals(name, that.name) && + Objects.equals(path, that.path); } @Override public int hashCode() { - return Objects.hash(name); + return Objects.hash(name, path); } @Override public String toString() { - return "ElasticsearchNode{name='" + name + "'}"; + return "node{" + path + ":" + name + "}"; } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersException.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersException.java new file mode 100644 index 00000000000..9056fdec282 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersException.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.testclusters; + +class TestClustersException extends RuntimeException { + TestClustersException(String message) { + super(message); + } + + TestClustersException(String message, Throwable cause) { + super(message, cause); + } + + TestClustersException(Throwable cause) { + super(cause); + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 2ea5e62306a..1fe8bec1902 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -40,6 +40,9 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; public class TestClustersPlugin implements Plugin { @@ -48,14 +51,17 @@ public class TestClustersPlugin implements Plugin { private static final String NODE_EXTENSION_NAME = "testClusters"; static final String HELPER_CONFIGURATION_NAME = "testclusters"; private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts"; + private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1; + private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES; - private final Logger logger = Logging.getLogger(TestClustersPlugin.class); + private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); // this is static because we need a single mapping across multi project builds, as some of the listeners we use, // like task graph are singletons across multi project builds. private static final Map> usedClusters = new ConcurrentHashMap<>(); private static final Map claimsInventory = new ConcurrentHashMap<>(); private static final Set runningClusters = Collections.synchronizedSet(new HashSet<>()); + private static volatile ExecutorService executorService; @Override public void apply(Project project) { @@ -106,6 +112,9 @@ public class TestClustersPlugin implements Plugin { // After each task we determine if there are clusters that are no longer needed. configureStopClustersHook(project); + // configure hooks to make sure no test cluster processes survive the build + configureCleanupHooks(project); + // Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the // configuration so the user doesn't have to repeat this. autoConfigureClusterDependencies(project, rootProject, container); @@ -117,8 +126,11 @@ public class TestClustersPlugin implements Plugin { NamedDomainObjectContainer container = project.container( ElasticsearchNode.class, name -> new ElasticsearchNode( + project.getPath(), name, - GradleServicesAdapter.getInstance(project) + GradleServicesAdapter.getInstance(project), + SyncTestClustersConfiguration.getTestClustersConfigurationExtractDir(project), + new File(project.getBuildDir(), "testclusters") ) ); project.getExtensions().add(NODE_EXTENSION_NAME, container); @@ -137,14 +149,14 @@ public class TestClustersPlugin implements Plugin { ); } - private void createUseClusterTaskExtension(Project project) { + private static void createUseClusterTaskExtension(Project project) { // register an extension for all current and future tasks, so that any task can declare that it wants to use a // specific cluster. project.getTasks().all((Task task) -> task.getExtensions().findByType(ExtraPropertiesExtension.class) .set( "useCluster", - new Closure(this, task) { + new Closure(project, task) { public void doCall(ElasticsearchNode node) { Object thisObject = this.getThisObject(); if (thisObject instanceof Task == false) { @@ -160,7 +172,7 @@ public class TestClustersPlugin implements Plugin { ); } - private void configureClaimClustersHook(Project project) { + private static void configureClaimClustersHook(Project project) { project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> taskExecutionGraph.getAllTasks() .forEach(task -> @@ -174,7 +186,7 @@ public class TestClustersPlugin implements Plugin { ); } - private void configureStartClustersHook(Project project) { + private static void configureStartClustersHook(Project project) { project.getGradle().addListener( new TaskActionListener() { @Override @@ -196,7 +208,7 @@ public class TestClustersPlugin implements Plugin { ); } - private void configureStopClustersHook(Project project) { + private static void configureStopClustersHook(Project project) { project.getGradle().addListener( new TaskExecutionListener() { @Override @@ -226,6 +238,7 @@ public class TestClustersPlugin implements Plugin { .filter(entry -> runningClusters.contains(entry.getKey())) .map(Map.Entry::getKey) .collect(Collectors.toList()); + runningClusters.removeAll(stoppable); } stoppable.forEach(each -> each.stop(false)); } @@ -251,7 +264,7 @@ public class TestClustersPlugin implements Plugin { project.getExtensions().getByName(NODE_EXTENSION_NAME); } - private void autoConfigureClusterDependencies( + private static void autoConfigureClusterDependencies( Project project, Project rootProject, NamedDomainObjectContainer container @@ -272,6 +285,59 @@ public class TestClustersPlugin implements Plugin { })); } + private static void configureCleanupHooks(Project project) { + synchronized (runningClusters) { + if (executorService == null || executorService.isTerminated()) { + executorService = Executors.newSingleThreadExecutor(); + } else { + throw new IllegalStateException("Trying to configure executor service twice"); + } + } + // When the Gradle daemon is used, it will interrupt all threads when the build concludes. + executorService.submit(() -> { + while (true) { + try { + Thread.sleep(Long.MAX_VALUE); + } catch (InterruptedException interrupted) { + shutDownAllClusters(); + Thread.currentThread().interrupt(); + return; + } + } + }); + + project.getGradle().buildFinished(buildResult -> { + logger.info("Build finished"); + shutdownExecutorService(); + }); + // When the Daemon is not used, or runs into issues, rely on a shutdown hook + // When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptable + // thread in the build) process will be stopped eventually when the daemon dies. + Runtime.getRuntime().addShutdownHook(new Thread(TestClustersPlugin::shutDownAllClusters)); + } + + private static void shutdownExecutorService() { + executorService.shutdownNow(); + try { + if (executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) { + throw new IllegalStateException( + "Failed to shut down executor service after " + + EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT + ); + } + } catch (InterruptedException e) { + logger.info("Wait for testclusters shutdown interrupted", e); + Thread.currentThread().interrupt(); + } + } + + private static void shutDownAllClusters() { + logger.info("Shutting down all test clusters", new RuntimeException()); + synchronized (runningClusters) { + runningClusters.forEach(each -> each.stop(true)); + runningClusters.clear(); + } + } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index 025c549489a..fc89a019f8d 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -154,10 +154,11 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { for (String each : text) { int i = output.indexOf(each); if (i == -1 ) { - fail("Expected `" + text + "` to appear at most once, but it didn't at all.\n\nOutout is:\n"+ output); + fail("Expected \n```" + each + "```\nto appear at most once, but it didn't at all.\n\nOutout is:\n"+ output + ); } if(output.indexOf(each) != output.lastIndexOf(each)) { - fail("Expected `" + text + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output); + fail("Expected `" + each + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output); } } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index f153919ac06..ee366ac7b7c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -76,8 +76,8 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { assertOutputContains( result.getOutput(), "> Task :user1", - "Starting `ElasticsearchNode{name='myTestCluster'}`", - "Stopping `ElasticsearchNode{name='myTestCluster'}`" + "Starting `node{::myTestCluster}`", + "Stopping `node{::myTestCluster}`" ); } @@ -88,7 +88,6 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { .withPluginClasspath() .build(); assertTaskSuccessful(result, ":user1", ":user2"); - assertStartedAndStoppedOnce(result); } @@ -98,7 +97,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true", + "Stopping `node{::myTestCluster}`, tailLogs: true", "Execution failed for task ':itAlwaysFails'." ); } @@ -110,7 +109,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true", + "Stopping `node{::myTestCluster}`, tailLogs: true", "Execution failed for task ':itAlwaysFails'." ); } @@ -146,8 +145,8 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { private void assertStartedAndStoppedOnce(BuildResult result) { assertOutputOnlyOnce( result.getOutput(), - "Starting `ElasticsearchNode{name='myTestCluster'}`", - "Stopping `ElasticsearchNode{name='myTestCluster'}`" + "Starting `node{::myTestCluster}`", + "Stopping `node{::myTestCluster}`" ); } } diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index 15e34bbccd4..67c9afdbc82 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -18,14 +18,14 @@ repositories { task user1 { useCluster testClusters.myTestCluster doLast { - println "user1 executing" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } task user2 { useCluster testClusters.myTestCluster doLast { - println "user2 executing" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } diff --git a/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle index dda6be2f6a5..783e6d9a80e 100644 --- a/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle +++ b/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle @@ -10,12 +10,12 @@ testClusters { task user1 { useCluster testClusters.myTestCluster doFirst { - println "$path" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } task user2 { useCluster testClusters.myTestCluster doFirst { - println "$path" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } diff --git a/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle index b62302d9d54..d13cab6eaa9 100644 --- a/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle +++ b/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle @@ -12,13 +12,13 @@ testClusters { task user1 { useCluster testClusters.myTestCluster doFirst { - println "$path" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } task user2 { useCluster testClusters.myTestCluster doFirst { - println "$path" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } diff --git a/buildSrc/src/testKit/testclusters_multiproject/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/build.gradle index 06234f4b368..18f7b277d01 100644 --- a/buildSrc/src/testKit/testclusters_multiproject/build.gradle +++ b/buildSrc/src/testKit/testclusters_multiproject/build.gradle @@ -20,13 +20,13 @@ testClusters { task user1 { useCluster testClusters.myTestCluster doFirst { - println "$path" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } task user2 { useCluster testClusters.myTestCluster doFirst { - println "$path" + println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}" } } \ No newline at end of file diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java index 86710ffdf8d..25eb260eec4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java @@ -21,6 +21,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; @@ -291,7 +293,7 @@ public final class CcrClient { } /** - * Deletes an auto follow pattern. + * Asynchronously deletes an auto follow pattern. * * See * the docs for more. @@ -313,4 +315,49 @@ public final class CcrClient { ); } + /** + * Gets an auto follow pattern. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetAutoFollowPatternResponse getAutoFollowPattern(GetAutoFollowPatternRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::getAutoFollowPattern, + options, + GetAutoFollowPatternResponse::fromXContent, + Collections.emptySet() + ); + } + + /** + * Asynchronously gets an auto follow pattern. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getAutoFollowPatternAsync(GetAutoFollowPatternRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::getAutoFollowPattern, + options, + GetAutoFollowPatternResponse::fromXContent, + listener, + Collections.emptySet() + ); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java index 8963919bcd1..5bcb0c04d3b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java @@ -20,9 +20,11 @@ package org.elasticsearch.client; import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; @@ -90,4 +92,12 @@ final class CcrRequestConverters { return new Request(HttpDelete.METHOD_NAME, endpoint); } + static Request getAutoFollowPattern(GetAutoFollowPatternRequest getAutoFollowPatternRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ccr", "auto_follow") + .addPathPart(getAutoFollowPatternRequest.getName()) + .build(); + return new Request(HttpGet.METHOD_NAME, endpoint); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternRequest.java new file mode 100644 index 00000000000..364fddb7198 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternRequest.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +/** + * Request class for get auto follow pattern api. + */ +public final class GetAutoFollowPatternRequest implements Validatable { + + private final String name; + + /** + * Get all auto follow patterns + */ + public GetAutoFollowPatternRequest() { + this.name = null; + } + + /** + * Get auto follow pattern with the specified name + * + * @param name The name of the auto follow pattern to get + */ + public GetAutoFollowPatternRequest(String name) { + this.name = Objects.requireNonNull(name); + } + + public String getName() { + return name; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponse.java new file mode 100644 index 00000000000..f4afb2d650e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponse.java @@ -0,0 +1,159 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public final class GetAutoFollowPatternResponse { + + public static GetAutoFollowPatternResponse fromXContent(final XContentParser parser) throws IOException { + final Map patterns = new HashMap<>(); + for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { + if (token == Token.FIELD_NAME) { + final String name = parser.currentName(); + final Pattern pattern = Pattern.PARSER.parse(parser, null); + patterns.put(name, pattern); + } + } + return new GetAutoFollowPatternResponse(patterns); + } + + private final Map patterns; + + GetAutoFollowPatternResponse(Map patterns) { + this.patterns = Collections.unmodifiableMap(patterns); + } + + public Map getPatterns() { + return patterns; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetAutoFollowPatternResponse that = (GetAutoFollowPatternResponse) o; + return Objects.equals(patterns, that.patterns); + } + + @Override + public int hashCode() { + return Objects.hash(patterns); + } + + public static class Pattern extends FollowConfig { + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "pattern", args -> new Pattern((String) args[0], (List) args[1], (String) args[2])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.REMOTE_CLUSTER_FIELD); + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD); + PARSER.declareInt(Pattern::setMaxReadRequestOperationCount, FollowConfig.MAX_READ_REQUEST_OPERATION_COUNT); + PARSER.declareField( + Pattern::setMaxReadRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_READ_REQUEST_SIZE.getPreferredName()), + PutFollowRequest.MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(Pattern::setMaxOutstandingReadRequests, FollowConfig.MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareInt(Pattern::setMaxWriteRequestOperationCount, FollowConfig.MAX_WRITE_REQUEST_OPERATION_COUNT); + PARSER.declareField( + Pattern::setMaxWriteRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_REQUEST_SIZE.getPreferredName()), + PutFollowRequest.MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(Pattern::setMaxOutstandingWriteRequests, FollowConfig.MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt(Pattern::setMaxWriteBufferCount, FollowConfig.MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + Pattern::setMaxWriteBufferSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_BUFFER_SIZE.getPreferredName()), + PutFollowRequest.MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareField( + Pattern::setMaxRetryDelay, + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.MAX_RETRY_DELAY_FIELD.getPreferredName()), + PutFollowRequest.MAX_RETRY_DELAY_FIELD, + ObjectParser.ValueType.STRING); + PARSER.declareField( + Pattern::setReadPollTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.READ_POLL_TIMEOUT.getPreferredName()), + PutFollowRequest.READ_POLL_TIMEOUT, + ObjectParser.ValueType.STRING); + } + + private final String remoteCluster; + private final List leaderIndexPatterns; + private final String followIndexNamePattern; + + Pattern(String remoteCluster, List leaderIndexPatterns, String followIndexNamePattern) { + this.remoteCluster = remoteCluster; + this.leaderIndexPatterns = leaderIndexPatterns; + this.followIndexNamePattern = followIndexNamePattern; + } + + public String getRemoteCluster() { + return remoteCluster; + } + + public List getLeaderIndexPatterns() { + return leaderIndexPatterns; + } + + public String getFollowIndexNamePattern() { + return followIndexNamePattern; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + Pattern pattern = (Pattern) o; + return Objects.equals(remoteCluster, pattern.remoteCluster) && + Objects.equals(leaderIndexPatterns, pattern.leaderIndexPatterns) && + Objects.equals(followIndexNamePattern, pattern.followIndexNamePattern); + } + + @Override + public int hashCode() { + return Objects.hash( + super.hashCode(), + remoteCluster, + leaderIndexPatterns, + followIndexNamePattern + ); + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java index 00b2d26abaf..391ee1fcd18 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java @@ -30,6 +30,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; @@ -37,6 +39,7 @@ import org.elasticsearch.client.ccr.PutFollowResponse; import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.common.xcontent.ObjectPath; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -48,11 +51,12 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; public class CCRIT extends ESRestHighLevelClientTestCase { @Before - public void setupRemoteClusterConfig() throws IOException { + public void setupRemoteClusterConfig() throws Exception { // Configure local cluster as remote cluster: // TODO: replace with nodes info highlevel rest client code when it is available: final Request request = new Request("GET", "/_nodes"); @@ -66,6 +70,14 @@ public class CCRIT extends ESRestHighLevelClientTestCase { ClusterUpdateSettingsResponse updateSettingsResponse = highLevelClient().cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT); assertThat(updateSettingsResponse.isAcknowledged(), is(true)); + + assertBusy(() -> { + Map localConnection = (Map) toMap(client() + .performRequest(new Request("GET", "/_remote/info"))) + .get("local"); + assertThat(localConnection, notNullValue()); + assertThat(localConnection.get("connected"), is(true)); + }); } public void testIndexFollowing() throws Exception { @@ -129,7 +141,6 @@ public class CCRIT extends ESRestHighLevelClientTestCase { assertThat(unfollowResponse.isAcknowledged(), is(true)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35937") public void testAutoFollowing() throws Exception { CcrClient ccrClient = highLevelClient().ccr(); PutAutoFollowPatternRequest putAutoFollowPatternRequest = @@ -146,8 +157,26 @@ public class CCRIT extends ESRestHighLevelClientTestCase { assertBusy(() -> { assertThat(indexExists("copy-logs-20200101"), is(true)); + // TODO: replace with HLRC follow stats when available: + Map rsp = toMap(client().performRequest(new Request("GET", "/copy-logs-20200101/_ccr/stats"))); + String index = null; + try { + index = ObjectPath.eval("indices.0.index", rsp); + } catch (Exception e){ } + assertThat(index, equalTo("copy-logs-20200101")); }); + GetAutoFollowPatternRequest getAutoFollowPatternRequest = + randomBoolean() ? new GetAutoFollowPatternRequest("pattern1") : new GetAutoFollowPatternRequest(); + GetAutoFollowPatternResponse getAutoFollowPatternResponse = + execute(getAutoFollowPatternRequest, ccrClient::getAutoFollowPattern, ccrClient::getAutoFollowPatternAsync); + assertThat(getAutoFollowPatternResponse.getPatterns().size(), equalTo(1)); + GetAutoFollowPatternResponse.Pattern pattern = getAutoFollowPatternResponse.getPatterns().get("pattern1"); + assertThat(pattern, notNullValue()); + assertThat(pattern.getRemoteCluster(), equalTo(putAutoFollowPatternRequest.getRemoteCluster())); + assertThat(pattern.getLeaderIndexPatterns(), equalTo(putAutoFollowPatternRequest.getLeaderIndexPatterns())); + assertThat(pattern.getFollowIndexNamePattern(), equalTo(putAutoFollowPatternRequest.getFollowIndexNamePattern())); + // Cleanup: final DeleteAutoFollowPatternRequest deleteAutoFollowPatternRequest = new DeleteAutoFollowPatternRequest("pattern1"); AcknowledgedResponse deleteAutoFollowPatternResponse = diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java new file mode 100644 index 00000000000..64eb9ba4f9f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD; +import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD; +import static org.elasticsearch.client.ccr.PutFollowRequest.REMOTE_CLUSTER_FIELD; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class GetAutoFollowPatternResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, + this::createTestInstance, + GetAutoFollowPatternResponseTests::toXContent, + GetAutoFollowPatternResponse::fromXContent) + .supportsUnknownFields(false) + .test(); + } + + private GetAutoFollowPatternResponse createTestInstance() { + int numPatterns = randomIntBetween(0, 16); + Map patterns = new HashMap<>(numPatterns); + for (int i = 0; i < numPatterns; i++) { + GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern( + randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4)); + if (randomBoolean()) { + pattern.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + pattern.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + pattern.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + pattern.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + pattern.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + pattern.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + pattern.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + pattern.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + pattern.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + pattern.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + } + patterns.put(randomAlphaOfLength(4), pattern); + } + return new GetAutoFollowPatternResponse(patterns); + } + + public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + for (Map.Entry entry : response.getPatterns().entrySet()) { + builder.startObject(entry.getKey()); + GetAutoFollowPatternResponse.Pattern pattern = entry.getValue(); + builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster()); + builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns()); + if (pattern.getFollowIndexNamePattern()!= null) { + builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern()); + } + entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + } + } + builder.endObject(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index 1d1aef514ca..fea6ad0fbd7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -34,6 +34,9 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; +import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; @@ -501,6 +504,70 @@ public class CCRDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testGetAutoFollowPattern() throws Exception { + RestHighLevelClient client = highLevelClient(); + + // Put auto follow pattern, so that we can get it: + { + final PutAutoFollowPatternRequest putRequest = + new PutAutoFollowPatternRequest("my_pattern", "local", Collections.singletonList("logs-*")); + AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT); + assertThat(putResponse.isAcknowledged(), is(true)); + } + + // tag::ccr-get-auto-follow-pattern-request + GetAutoFollowPatternRequest request = + new GetAutoFollowPatternRequest("my_pattern"); // <1> + // end::ccr-get-auto-follow-pattern-request + + // tag::ccr-get-auto-follow-pattern-execute + GetAutoFollowPatternResponse response = client.ccr() + .getAutoFollowPattern(request, RequestOptions.DEFAULT); + // end::ccr-get-auto-follow-pattern-execute + + // tag::ccr-get-auto-follow-pattern-response + Map patterns = response.getPatterns(); + Pattern pattern = patterns.get("my_pattern"); // <1> + pattern.getLeaderIndexPatterns(); + // end::ccr-get-auto-follow-pattern-response + + // tag::ccr-get-auto-follow-pattern-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetAutoFollowPatternResponse + response) { // <1> + Map patterns = response.getPatterns(); + Pattern pattern = patterns.get("my_pattern"); + pattern.getLeaderIndexPatterns(); + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ccr-get-auto-follow-pattern-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-get-auto-follow-pattern-execute-async + client.ccr().getAutoFollowPatternAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ccr-get-auto-follow-pattern-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // Cleanup: + { + DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern"); + AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT); + assertThat(deleteResponse.isAcknowledged(), is(true)); + } + } + static Map toMap(Response response) throws IOException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc new file mode 100644 index 00000000000..61ab8d58e9c --- /dev/null +++ b/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc @@ -0,0 +1,35 @@ +-- +:api: ccr-get-auto-follow-pattern +:request: GetAutoFollowPatternRequest +:response: GetAutoFollowPatternResponse +-- + +[id="{upid}-{api}"] +=== Get Auto Follow Pattern API + +[id="{upid}-{api}-request"] +==== Request + +The Get Auto Follow Pattern API allows you to get a specified auto follow pattern +or all auto follow patterns. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The name of the auto follow pattern to get. + Use the default constructor to get all auto follow patterns. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ includes the requested auto follow pattern or +all auto follow patterns if default constructor or request class was used. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Get the requested pattern from the list of returned patterns + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index eb2f0b98181..661ce78fe80 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -472,6 +472,7 @@ The Java High Level REST Client supports the following CCR APIs: * <<{upid}-ccr-unfollow>> * <<{upid}-ccr-put-auto-follow-pattern>> * <<{upid}-ccr-delete-auto-follow-pattern>> +* <<{upid}-ccr-get-auto-follow-pattern>> include::ccr/put_follow.asciidoc[] include::ccr/pause_follow.asciidoc[] @@ -479,6 +480,7 @@ include::ccr/resume_follow.asciidoc[] include::ccr/unfollow.asciidoc[] include::ccr/put_auto_follow_pattern.asciidoc[] include::ccr/delete_auto_follow_pattern.asciidoc[] +include::ccr/get_auto_follow_pattern.asciidoc[] == Index Lifecycle Management APIs diff --git a/docs/painless/painless-contexts/painless-score-context.asciidoc b/docs/painless/painless-contexts/painless-score-context.asciidoc index bd1e1de7f77..2bec9021c17 100644 --- a/docs/painless/painless-contexts/painless-score-context.asciidoc +++ b/docs/painless/painless-contexts/painless-score-context.asciidoc @@ -11,8 +11,10 @@ score to documents returned from a query. User-defined parameters passed in as part of the query. `doc` (`Map`, read-only):: - Contains the fields of the current document where each field is a - `List` of values. + Contains the fields of the current document. For single-valued fields, + the value can be accessed via `doc['fieldname'].value`. For multi-valued + fields, this returns the first value; other values can be accessed + via `doc['fieldname'].get(index)` `_score` (`double` read-only):: The similarity score of the current document. @@ -24,4 +26,33 @@ score to documents returned from a query. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. + +*Example* + +To run this example, first follow the steps in +<>. + +The following query finds all unsold seats, with lower 'row' values +scored higher. + +[source,js] +-------------------------------------------------- +GET /seats/_search +{ + "query": { + "function_score": { + "query": { + "match": { "sold": "false" } + }, + "script_score" : { + "script" : { + "source": "1.0 / doc['row'].value" + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:seats] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-similarity-context.asciidoc b/docs/painless/painless-contexts/painless-similarity-context.asciidoc index 53b37be52b6..1d847f516c8 100644 --- a/docs/painless/painless-contexts/painless-similarity-context.asciidoc +++ b/docs/painless/painless-contexts/painless-similarity-context.asciidoc @@ -15,6 +15,9 @@ documents in a query. `params` (`Map`, read-only):: User-defined parameters passed in at query-time. +`weight` (`float`, read-only):: + The weight as calculated by a <> + `query.boost` (`float`, read-only):: The boost value if provided by the query. If this is not provided the value is `1.0f`. @@ -37,12 +40,23 @@ documents in a query. The total occurrences of the current term in the index. `doc.length` (`long`, read-only):: - The number of tokens the current document has in the current field. + The number of tokens the current document has in the current field. This + is decoded from the stored {ref}/norms.html[norms] and may be approximate for + long fields `doc.freq` (`long`, read-only):: The number of occurrences of the current term in the current document for the current field. +Note that the `query`, `field`, and `term` variables are also available to the +<>. They are more efficiently used +there, as they are constant for all documents. + +For queries that contain multiple terms, the script is called once for each +term with that term's calculated weight, and the results are summed. Note that some +terms might have a `doc.freq` value of `0` on a document, for example if a query +uses synonyms. + *Return* `double`:: diff --git a/docs/painless/painless-contexts/painless-sort-context.asciidoc b/docs/painless/painless-contexts/painless-sort-context.asciidoc index 9efd5076688..64c17ad07a6 100644 --- a/docs/painless/painless-contexts/painless-sort-context.asciidoc +++ b/docs/painless/painless-contexts/painless-sort-context.asciidoc @@ -10,8 +10,10 @@ Use a Painless script to User-defined parameters passed in as part of the query. `doc` (`Map`, read-only):: - Contains the fields of the current document where each field is a - `List` of values. + Contains the fields of the current document. For single-valued fields, + the value can be accessed via `doc['fieldname'].value`. For multi-valued + fields, this returns the first value; other values can be accessed + via `doc['fieldname'].get(index)` `_score` (`double` read-only):: The similarity score of the current document. @@ -23,4 +25,37 @@ Use a Painless script to *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. + +*Example* + +To run this example, first follow the steps in +<>. + +To sort results by the length of the `theatre` field, submit the following query: + +[source,js] +---- +GET /_search +{ + "query" : { + "term" : { "sold" : "true" } + }, + "sort" : { + "_script" : { + "type" : "number", + "script" : { + "lang": "painless", + "source": "doc['theatre'].value.length() * params.factor", + "params" : { + "factor" : 1.1 + } + }, + "order" : "asc" + } + } +} + +---- +// CONSOLE +// TEST[setup:seats] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-weight-context.asciidoc b/docs/painless/painless-contexts/painless-weight-context.asciidoc index ad215d5386b..319b7999aa8 100644 --- a/docs/painless/painless-contexts/painless-weight-context.asciidoc +++ b/docs/painless/painless-contexts/painless-weight-context.asciidoc @@ -3,8 +3,11 @@ Use a Painless script to create a {ref}/index-modules-similarity.html[weight] for use in a -<>. Weight is used to prevent -recalculation of constants that remain the same across documents. +<>. The weight makes up the +part of the similarity calculation that is independent of the document being +scored, and so can be built up front and cached. + +Queries that contain multiple terms calculate a separate weight for each term. *Variables* diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index 3cd21b21df4..11de3e5a27f 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -27,9 +27,6 @@ or by adding the requested fields in the request body (see example below). Fields can also be specified with wildcards in similar way to the <> -[WARNING] -Note that the usage of `/_termvector` is deprecated in 2.0, and replaced by `/_termvectors`. - [float] === Return values diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index 055109a4ce2..d2d68fc1359 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -46,7 +46,7 @@ PUT my_index/_doc/1?refresh PUT my_index/_doc/2?refresh { - "text": "This is a another question", + "text": "This is another question", "my_join_field": { "name": "question" } @@ -417,7 +417,7 @@ The mapping above represents the following tree: | vote -Indexing a grand child document requires a `routing` value equals +Indexing a grandchild document requires a `routing` value equals to the grand-parent (the greater parent of the lineage): @@ -436,4 +436,4 @@ PUT my_index/_doc/3?routing=1&refresh <1> // TEST[continued] <1> This child document must be on the same shard than its grand-parent and parent -<2> The parent id of this document (must points to an `answer` document) \ No newline at end of file +<2> The parent id of this document (must points to an `answer` document) diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index a543ef4b054..83370a93d55 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -119,3 +119,10 @@ while now an exception is thrown. The deprecated graph endpoints (those with `/_graph/_explore`) have been removed. + + +[float] +==== Deprecated `_termvector` endpoint removed + +The `_termvector` endpoint was deprecated in 2.0 and has now been removed. +The endpoint `_termvectors` (plural) should be used instead. diff --git a/docs/reference/migration/migrate_7_0/java.asciidoc b/docs/reference/migration/migrate_7_0/java.asciidoc index 4357b3fa728..e48a4cf1b45 100644 --- a/docs/reference/migration/migrate_7_0/java.asciidoc +++ b/docs/reference/migration/migrate_7_0/java.asciidoc @@ -32,4 +32,10 @@ was moved to `org.elasticsearch.search.aggregations.PipelineAggregationBuilders` ==== `Retry.withBackoff` methods with `Settings` removed The variants of `Retry.withBackoff` that included `Settings` have been removed -because `Settings` is no longer needed. \ No newline at end of file +because `Settings` is no longer needed. + +[float] +==== Deprecated method `Client#termVector` removed + +The client method `termVector`, deprecated in 2.0, has been removed. The method +`termVectors` (plural) should be used instead. \ No newline at end of file diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 4b96fe0e706..f46a4a91e7f 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -125,6 +125,11 @@ TIP: Keeping older segments alive means that more file handles are needed. Ensure that you have configured your nodes to have ample free file handles. See <>. +NOTE: To prevent against issues caused by having too many scrolls open, the +user is not allowed to open scrolls past a certain limit. By default, the +maximum number of open scrolls is 500. This limit can be updated with the +`search.max_open_scroll_context` cluster setting. + You can check how many search contexts are open with the <>: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml new file mode 100644 index 00000000000..f9d5c079856 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/70_mix_typeless_typeful.yml @@ -0,0 +1,34 @@ +--- +"bulk without types on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + - do: + bulk: + refresh: true + body: + - index: + _index: index + _id: 0 + - foo: bar + - index: + _index: index + _id: 1 + - foo: bar + + - do: + count: + index: index + + - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml new file mode 100644 index 00000000000..22df4f5dc43 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/70_mix_typeless_typeful.yml @@ -0,0 +1,42 @@ +--- +"DELETE with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + catch: bad_request + delete: + index: index + type: some_random_type + id: 1 + + - match: { error.root_cause.0.reason: "/Rejecting.mapping.update.to.\\[index\\].as.the.final.mapping.would.have.more.than.1.type.*/" } + + - do: + delete: + index: index + id: 1 + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "1"} + - match: { _version: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml new file mode 100644 index 00000000000..baefba7c312 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/40_mix_typeless_typeful.yml @@ -0,0 +1,56 @@ +--- +"Explain with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + indices.refresh: {} + + - do: + catch: missing + explain: + index: index + type: some_random_type + id: 1 + body: + query: + match_all: {} + + - match: { _index: "index" } + - match: { _type: "some_random_type" } + - match: { _id: "1"} + - match: { matched: false} + + - do: + explain: + index: index + type: _doc #todo: make _explain typeless and remove this + id: 1 + body: + query: + match_all: {} + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "1"} + - is_true: matched + - match: { explanation.value: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml new file mode 100644 index 00000000000..71907461da3 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/100_mix_typeless_typeful.yml @@ -0,0 +1,46 @@ +--- +"GET with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + catch: missing + get: + index: index + type: some_random_type + id: 1 + + - match: { _index: "index" } + - match: { _type: "some_random_type" } + - match: { _id: "1"} + - match: { found: false} + + - do: + get: + index: index + id: 1 + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "1"} + - match: { _version: 1} + - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml new file mode 100644 index 00000000000..5e225ec1ad3 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/70_mix_typeless_typeful.yml @@ -0,0 +1,62 @@ +--- +"Index with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + id: 1 + body: { foo: bar } + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _id: "1"} + - match: { _version: 1} + + - do: + get: # not using typeless API on purpose + index: index + type: not_doc + id: 1 + + - match: { _index: "index" } + - match: { _type: "not_doc" } # the important bit to check + - match: { _id: "1"} + - match: { _version: 1} + - match: { _source: { foo: bar }} + + + - do: + index: + index: index + body: { foo: bar } + + - match: { _index: "index" } + - match: { _type: "_doc" } + - match: { _version: 1} + - set: { _id: id } + + - do: + get: # using typeful API on purpose + index: index + type: not_doc + id: '$id' + + - match: { _index: "index" } + - match: { _type: "not_doc" } # the important bit to check + - match: { _id: $id} + - match: { _version: 1} + - match: { _source: { foo: bar }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml new file mode 100644 index 00000000000..89e0d42a9e7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/70_mix_typeless_typeful.yml @@ -0,0 +1,23 @@ +--- +"GET mapping with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + indices.get_mapping: + include_type_name: false + index: index + + - match: { index.mappings.properties.foo.type: "keyword" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml new file mode 100644 index 00000000000..5f9efb1a375 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_mix_typeless_typeful.yml @@ -0,0 +1,52 @@ +--- +"PUT mapping with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: include_type_name was introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + indices.put_mapping: + include_type_name: false + index: index + body: + properties: + bar: + type: "long" + + - do: + indices.get_mapping: + include_type_name: false + index: index + + - match: { index.mappings.properties.foo.type: "keyword" } + - match: { index.mappings.properties.bar.type: "long" } + + - do: + indices.put_mapping: + include_type_name: false + index: index + body: + properties: + foo: + type: "keyword" # also test no-op updates that trigger special logic wrt the mapping version + + - do: + catch: bad_request + indices.put_mapping: + index: index + body: + some_other_type: + properties: + bar: + type: "long" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml new file mode 100644 index 00000000000..24bb8a7d34f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mtermvectors/30_mix_typeless_typeful.yml @@ -0,0 +1,32 @@ +--- +"mtermvectors without types on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type : "text" + term_vector : "with_positions_offsets" + + - do: + index: + index: index + id: 1 + body: { foo: bar } + + - do: + mtermvectors: + body: + docs: + - _index: index + _id: 1 + + - match: {docs.0.term_vectors.foo.terms.bar.term_freq: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml new file mode 100644 index 00000000000..403f2b5b8cf --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/termvectors/50_mix_typeless_typeful.yml @@ -0,0 +1,45 @@ +--- +"Term vectors with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "text" + term_vector: "with_positions" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + indices.refresh: {} + + - do: + termvectors: + index: index + type: _doc # todo: remove when termvectors support typeless API + id: 1 + + - is_true: found + - match: {_type: _doc} + - match: {term_vectors.foo.terms.bar.term_freq: 1} + + - do: + termvectors: + index: index + type: some_random_type + id: 1 + + - is_false: found diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml new file mode 100644 index 00000000000..066f0989c35 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/90_mix_typeless_typeful.yml @@ -0,0 +1,39 @@ +--- +"Update with typeless API on an index that has types": + + - skip: + version: " - 6.99.99" + reason: Typeless APIs were introduced in 7.0.0 + + - do: + indices.create: # not using include_type_name: false on purpose + index: index + body: + mappings: + not_doc: + properties: + foo: + type: "keyword" + + - do: + index: + index: index + type: not_doc + id: 1 + body: { foo: bar } + + - do: + update: + index: index + id: 1 + body: + doc: + foo: baz + + - do: + get: + index: index + type: not_doc + id: 1 + + - match: { _source.foo: baz } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e08a8d4b327..651fba0b77e 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -36,6 +36,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Objects; public class Version implements Comparable, ToXContentFragment { /* @@ -192,7 +193,30 @@ public class Version implements Comparable, ToXContentFragment { case V_EMPTY_ID: return V_EMPTY; default: - return new Version(id, org.apache.lucene.util.Version.LATEST); + // We need at least the major of the Lucene version to be correct. + // Our best guess is to use the same Lucene version as the previous + // version in the list, assuming that it didn't change. This is at + // least correct for patch versions of known minors since we never + // update the Lucene dependency for patch versions. + List versions = DeclaredVersionsHolder.DECLARED_VERSIONS; + Version tmp = new Version(id, org.apache.lucene.util.Version.LATEST); + int index = Collections.binarySearch(versions, tmp); + if (index < 0) { + index = -2 - index; + } else { + assert false : "Version [" + tmp + "] is declared but absent from the switch statement in Version#fromId"; + } + final org.apache.lucene.util.Version luceneVersion; + if (index == -1) { + // this version is older than any supported version, so we + // assume it is the previous major to the oldest Lucene version + // that we know about + luceneVersion = org.apache.lucene.util.Version.fromBits( + versions.get(0).luceneVersion.major - 1, 0, 0); + } else { + luceneVersion = versions.get(index).luceneVersion; + } + return new Version(id, luceneVersion); } } @@ -300,7 +324,7 @@ public class Version implements Comparable, ToXContentFragment { this.minor = (byte) ((id / 10000) % 100); this.revision = (byte) ((id / 100) % 100); this.build = (byte) (id % 100); - this.luceneVersion = luceneVersion; + this.luceneVersion = Objects.requireNonNull(luceneVersion); } public boolean after(Version version) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index 1211e03ed79..9b9c0ca7b16 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -77,14 +76,14 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction mappings = state.metaData().getIndices().get(concreteIndex).getMappings(); - if (mappings.isEmpty()) { + MappingMetaData mapping = state.metaData().getIndices().get(concreteIndex).mapping(); + if (mapping == null) { listener.onResponse(new TypesExistsResponse(false)); return; } for (String type : request.types()) { - if (!mappings.containsKey(type)) { + if (mapping.type().equals(type) == false) { listener.onResponse(new TypesExistsResponse(false)); return; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 8e084c1ceac..66697cb907d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -334,8 +334,7 @@ public class TransportBulkAction extends HandledTransportAction termVector(TermVectorsRequest request); - - /** - * An action that returns the term vectors for a specific document. - * - * @param request The term vector request - */ - @Deprecated - void termVector(TermVectorsRequest request, ActionListener listener); - - /** - * Builder for the term vector request. - */ - @Deprecated - TermVectorsRequestBuilder prepareTermVector(); - - /** - * Builder for the term vector request. - * - * @param index The index to load the document from - * @param type The type of the document - * @param id The id of the document - */ - @Deprecated - TermVectorsRequestBuilder prepareTermVector(String index, String type, String id); - /** * Multi get term vectors. */ diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index d6ce6089017..d642101e1c3 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -581,30 +581,6 @@ public abstract class AbstractClient implements Client { return new TermVectorsRequestBuilder(this, TermVectorsAction.INSTANCE, index, type, id); } - @Deprecated - @Override - public ActionFuture termVector(final TermVectorsRequest request) { - return termVectors(request); - } - - @Deprecated - @Override - public void termVector(final TermVectorsRequest request, final ActionListener listener) { - termVectors(request, listener); - } - - @Deprecated - @Override - public TermVectorsRequestBuilder prepareTermVector() { - return prepareTermVectors(); - } - - @Deprecated - @Override - public TermVectorsRequestBuilder prepareTermVector(String index, String type, String id) { - return prepareTermVectors(index, type, id); - } - @Override public ActionFuture multiTermVectors(final MultiTermVectorsRequest request) { return execute(MultiTermVectorsAction.INSTANCE, request); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index b1b092e0086..5a143d16d9d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -449,13 +449,37 @@ public class IndexMetaData implements Diffable, ToXContentFragmen return this.aliases; } + /** + * Return an object that maps each type to the associated mappings. + * The return value is never {@code null} but may be empty if the index + * has no mappings. + * @deprecated Use {@link #mapping()} instead now that indices have a single type + */ + @Deprecated public ImmutableOpenMap getMappings() { return mappings; } + /** + * Return the concrete mapping for this index or {@code null} if this index has no mappings at all. + */ @Nullable - public MappingMetaData mapping(String mappingType) { - return mappings.get(mappingType); + public MappingMetaData mapping() { + for (ObjectObjectCursor cursor : mappings) { + if (cursor.key.equals(MapperService.DEFAULT_MAPPING) == false) { + return cursor.value; + } + } + return null; + } + + /** + * Get the default mapping. + * NOTE: this is always {@code null} for 7.x indices which are disallowed to have a default mapping. + */ + @Nullable + public MappingMetaData defaultMapping() { + return mappings.get(MapperService.DEFAULT_MAPPING); } public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 36fa6c6e6fb..6901c92c63a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -742,13 +742,12 @@ public class MetaData implements Iterable, Diffable, To /** * @param concreteIndex The concrete index to check if routing is required - * @param type The type to check if routing is required * @return Whether routing is required according to the mapping for the specified index and type */ - public boolean routingRequired(String concreteIndex, String type) { + public boolean routingRequired(String concreteIndex) { IndexMetaData indexMetaData = indices.get(concreteIndex); if (indexMetaData != null) { - MappingMetaData mappingMetaData = indexMetaData.getMappings().get(type); + MappingMetaData mappingMetaData = indexMetaData.mapping(); if (mappingMetaData != null) { return mappingMetaData.routing().required(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 1832d735241..002ed86da34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -263,7 +263,7 @@ public class MetaDataMappingService { updateList.add(indexMetaData); // try and parse it (no need to add it here) so we can bail early in case of parsing exception DocumentMapper newMapper; - DocumentMapper existingMapper = mapperService.documentMapper(request.type()); + DocumentMapper existingMapper = mapperService.documentMapper(); if (MapperService.DEFAULT_MAPPING.equals(request.type())) { // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default newMapper = mapperService.parse(request.type(), mappingUpdateSource, false); @@ -295,12 +295,22 @@ public class MetaDataMappingService { // we use the exact same indexService and metadata we used to validate above here to actually apply the update final Index index = indexMetaData.getIndex(); final MapperService mapperService = indexMapperServices.get(index); + String typeForUpdate = mappingType; // the type to use to apply the mapping update + if (MapperService.SINGLE_MAPPING_NAME.equals(typeForUpdate)) { + // If the user gave _doc as a special type value or if (s)he is using the new typeless APIs, + // then we apply the mapping update to the existing type. This allows to move to typeless + // APIs with indices whose type name is different from `_doc`. + DocumentMapper mapper = mapperService.documentMapper(); + if (mapper != null) { + typeForUpdate = mapper.type(); + } + } CompressedXContent existingSource = null; - DocumentMapper existingMapper = mapperService.documentMapper(mappingType); + DocumentMapper existingMapper = mapperService.documentMapper(typeForUpdate); if (existingMapper != null) { existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE); + DocumentMapper mergedMapper = mapperService.merge(typeForUpdate, mappingUpdateSource, MergeReason.MAPPING_UPDATE); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 0a6335ebc49..5ed73d9c48b 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -397,6 +397,7 @@ public final class ClusterSettings extends AbstractScopedSettings { SearchService.MAX_KEEPALIVE_SETTING, MultiBucketConsumerService.MAX_BUCKET_SETTING, SearchService.LOW_LEVEL_CANCELLATION_SETTING, + SearchService.MAX_OPEN_SCROLL_CONTEXT, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, Node.NODE_DATA_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5dec98b950c..e29b6ca362d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2108,8 +2108,9 @@ public class InternalEngine extends Engine { // Give us the opportunity to upgrade old segments while performing // background merges MergePolicy mergePolicy = config().getMergePolicy(); + // always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes. + iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); if (softDeleteEnabled) { - iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD); mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery, new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy)); } diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 3b84b00b0f8..fc1796dfcc5 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -39,10 +39,12 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; import org.elasticsearch.index.fieldvisitor.FieldsVisitor; import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -157,13 +159,11 @@ public final class ShardGetService extends AbstractIndexShardComponent { Engine.GetResult get = null; if (type != null) { - Term uidTerm = mapperService.createUidTerm(type, id); - if (uidTerm != null) { - get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm) - .version(version).versionType(versionType)); - if (get.exists() == false) { - get.close(); - } + Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); + get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm) + .version(version).versionType(versionType)); + if (get.exists() == false) { + get.close(); } } @@ -202,7 +202,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { } } - DocumentMapper docMapper = mapperService.documentMapper(type); + DocumentMapper docMapper = mapperService.documentMapper(); if (gFields != null && gFields.length > 0) { for (String field : gFields) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 86674617272..14e8ad74188 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -106,7 +106,8 @@ final class DocumentParser { throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]"); } - if (Objects.equals(source.type(), docMapper.type()) == false) { + if (Objects.equals(source.type(), docMapper.type()) == false && + MapperService.SINGLE_MAPPING_NAME.equals(source.type()) == false) { // used by typeless APIs throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + docMapper.type() + "]"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 6aab34c5f76..7663ec817a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -25,7 +25,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; -import org.apache.lucene.index.Term; import org.elasticsearch.Assertions; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -218,7 +217,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable { for (DocumentMapper documentMapper : updatedEntries.values()) { String mappingType = documentMapper.type(); - CompressedXContent incomingMappingSource = newIndexMetaData.mapping(mappingType).source(); + MappingMetaData mappingMetaData; + if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { + mappingMetaData = newIndexMetaData.defaultMapping(); + } else { + mappingMetaData = newIndexMetaData.mapping(); + assert mappingType.equals(mappingMetaData.type()); + } + CompressedXContent incomingMappingSource = mappingMetaData.source(); String op = existingMappers.contains(mappingType) ? "updated" : "added"; if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) { @@ -254,13 +260,25 @@ public class MapperService extends AbstractIndexComponent implements Closeable { if (currentIndexMetaData.getMappingVersion() == newIndexMetaData.getMappingVersion()) { // if the mapping version is unchanged, then there should not be any updates and all mappings should be the same assert updatedEntries.isEmpty() : updatedEntries; - for (final ObjectCursor mapping : newIndexMetaData.getMappings().values()) { - final CompressedXContent currentSource = currentIndexMetaData.mapping(mapping.value.type()).source(); - final CompressedXContent newSource = mapping.value.source(); + + MappingMetaData defaultMapping = newIndexMetaData.defaultMapping(); + if (defaultMapping != null) { + final CompressedXContent currentSource = currentIndexMetaData.defaultMapping().source(); + final CompressedXContent newSource = defaultMapping.source(); assert currentSource.equals(newSource) : - "expected current mapping [" + currentSource + "] for type [" + mapping.value.type() + "] " + "expected current mapping [" + currentSource + "] for type [" + defaultMapping.type() + "] " + "to be the same as new mapping [" + newSource + "]"; } + + MappingMetaData mapping = newIndexMetaData.mapping(); + if (mapping != null) { + final CompressedXContent currentSource = currentIndexMetaData.mapping().source(); + final CompressedXContent newSource = mapping.source(); + assert currentSource.equals(newSource) : + "expected current mapping [" + currentSource + "] for type [" + mapping.type() + "] " + + "to be the same as new mapping [" + newSource + "]"; + } + } else { // if the mapping version is changed, it should increase, there should be updates, and the mapping should be different final long currentMappingVersion = currentIndexMetaData.getMappingVersion(); @@ -270,7 +288,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable { + "to be less than new mapping version [" + newMappingVersion + "]"; assert updatedEntries.isEmpty() == false; for (final DocumentMapper documentMapper : updatedEntries.values()) { - final MappingMetaData currentMapping = currentIndexMetaData.mapping(documentMapper.type()); + final MappingMetaData currentMapping; + if (documentMapper.type().equals(MapperService.DEFAULT_MAPPING)) { + currentMapping = currentIndexMetaData.defaultMapping(); + } else { + currentMapping = currentIndexMetaData.mapping(); + assert currentMapping == null || documentMapper.type().equals(currentMapping.type()); + } if (currentMapping != null) { final CompressedXContent currentSource = currentMapping.source(); final CompressedXContent newSource = documentMapper.mappingSource(); @@ -766,11 +790,4 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } } - /** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */ - public Term createUidTerm(String type, String id) { - if (mapper == null || mapper.type().equals(type) == false) { - return null; - } - return new Term(IdFieldMapper.NAME, Uid.encodeId(id)); - } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index fb541b5337e..27c7c8f7169 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.Sort; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ThreadInterruptedException; import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; @@ -63,6 +62,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AsyncIOProcessor; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; @@ -77,6 +77,7 @@ import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.GetResult; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineFactory; @@ -548,7 +549,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } catch (final AlreadyClosedException e) { // okay, the index was deleted } - }); + }, null); } } // set this last, once we finished updating all internal state. @@ -815,23 +816,23 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) { return new Engine.DeleteResult(e, version, operationPrimaryTerm, seqNo, false); } - final Term uid = extractUidForDelete(type, id); + if (resolveType(type).equals(mapperService.documentMapper().type()) == false) { + // We should never get there due to the fact that we generate mapping updates on deletes, + // but we still prefer to have a hard exception here as we would otherwise delete a + // document in the wrong type. + throw new IllegalStateException("Deleting document from type [" + resolveType(type) + "] while current type is [" + + mapperService.documentMapper().type() + "]"); + } + final Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version, versionType, origin); return delete(getEngine(), delete); } - private static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, + private Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType, Engine.Operation.Origin origin) { long startTime = System.nanoTime(); - return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime); - } - - private Term extractUidForDelete(String type, String id) { - // This is only correct because we create types dynamically on delete operations - // otherwise this could match the same _id from a different type - BytesRef idBytes = Uid.encodeId(id); - return new Term(IdFieldMapper.NAME, idBytes); + return new Engine.Delete(resolveType(type), id, uid, seqNo, primaryTerm, version, versionType, origin, startTime); } private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException { @@ -853,6 +854,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl public Engine.GetResult get(Engine.Get get) { readAllowed(); + DocumentMapper mapper = mapperService.documentMapper(); + if (mapper == null || mapper.type().equals(resolveType(get.type())) == false) { + return GetResult.NOT_EXISTS; + } return getEngine().get(get, this::acquireSearcher); } @@ -2273,8 +2278,23 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } + /** + * If an index/update/get/delete operation is using the special `_doc` type, then we replace + * it with the actual type that is being used in the mappings so that users may use typeless + * APIs with indices that have types. + */ + private String resolveType(String type) { + if (MapperService.SINGLE_MAPPING_NAME.equals(type)) { + DocumentMapper docMapper = mapperService.documentMapper(); + if (docMapper != null) { + return docMapper.type(); + } + } + return type; + } + private DocumentMapperForType docMapper(String type) { - return mapperService.documentMapperWithAutoCreate(type); + return mapperService.documentMapperWithAutoCreate(resolveType(type)); } private EngineConfig newEngineConfig() { @@ -2316,14 +2336,26 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl indexShardOperationPermits.asyncBlockOperations(onPermitAcquired, timeout.duration(), timeout.timeUnit()); } - private void bumpPrimaryTerm(final long newPrimaryTerm, final CheckedRunnable onBlocked) { + private void bumpPrimaryTerm(final long newPrimaryTerm, + final CheckedRunnable onBlocked, + @Nullable ActionListener combineWithAction) { assert Thread.holdsLock(mutex); - assert newPrimaryTerm > pendingPrimaryTerm; + assert newPrimaryTerm > pendingPrimaryTerm || (newPrimaryTerm >= pendingPrimaryTerm && combineWithAction != null); assert operationPrimaryTerm <= pendingPrimaryTerm; final CountDownLatch termUpdated = new CountDownLatch(1); indexShardOperationPermits.asyncBlockOperations(new ActionListener() { @Override public void onFailure(final Exception e) { + try { + innerFail(e); + } finally { + if (combineWithAction != null) { + combineWithAction.onFailure(e); + } + } + } + + private void innerFail(final Exception e) { try { failShard("exception during primary term transition", e); } catch (AlreadyClosedException ace) { @@ -2333,7 +2365,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl @Override public void onResponse(final Releasable releasable) { - try (Releasable ignored = releasable) { + final RunOnce releaseOnce = new RunOnce(releasable::close); + try { assert operationPrimaryTerm <= pendingPrimaryTerm; termUpdated.await(); // indexShardOperationPermits doesn't guarantee that async submissions are executed @@ -2343,7 +2376,17 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl onBlocked.run(); } } catch (final Exception e) { - onFailure(e); + if (combineWithAction == null) { + // otherwise leave it to combineWithAction to release the permit + releaseOnce.run(); + } + innerFail(e); + } finally { + if (combineWithAction != null) { + combineWithAction.onResponse(releasable); + } else { + releaseOnce.run(); + } } } }, 30, TimeUnit.MINUTES); @@ -2371,7 +2414,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl public void acquireReplicaOperationPermit(final long opPrimaryTerm, final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, final ActionListener onPermitAcquired, final String executorOnDelay, final Object debugInfo) { - innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired, + innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired, false, (listener) -> indexShardOperationPermits.acquire(listener, executorOnDelay, true, debugInfo)); } @@ -2393,7 +2436,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final long maxSeqNoOfUpdatesOrDeletes, final ActionListener onPermitAcquired, final TimeValue timeout) { - innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired, + innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired, true, (listener) -> indexShardOperationPermits.asyncBlockOperations(listener, timeout.duration(), timeout.timeUnit())); } @@ -2401,41 +2444,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, final ActionListener onPermitAcquired, - final Consumer> consumer) { + final boolean allowCombineOperationWithPrimaryTermUpdate, + final Consumer> operationExecutor) { verifyNotClosed(); - if (opPrimaryTerm > pendingPrimaryTerm) { - synchronized (mutex) { - if (opPrimaryTerm > pendingPrimaryTerm) { - final IndexShardState shardState = state(); - // only roll translog and update primary term if shard has made it past recovery - // Having a new primary term here means that the old primary failed and that there is a new primary, which again - // means that the master will fail this shard as all initializing shards are failed when a primary is selected - // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint - if (shardState != IndexShardState.POST_RECOVERY && - shardState != IndexShardState.STARTED) { - throw new IndexShardNotStartedException(shardId, shardState); - } - if (opPrimaryTerm > pendingPrimaryTerm) { - bumpPrimaryTerm(opPrimaryTerm, () -> { - updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); - final long currentGlobalCheckpoint = getGlobalCheckpoint(); - final long maxSeqNo = seqNoStats().getMaxSeqNo(); - logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]", - opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo); - if (currentGlobalCheckpoint < maxSeqNo) { - resetEngineToGlobalCheckpoint(); - } else { - getEngine().rollTranslogGeneration(); - } - }); - } - } - } - } - assert opPrimaryTerm <= pendingPrimaryTerm - : "operation primary term [" + opPrimaryTerm + "] should be at most [" + pendingPrimaryTerm + "]"; - consumer.accept(new ActionListener() { + // This listener is used for the execution of the operation. If the operation requires all the permits for its + // execution and the primary term must be updated first, we can combine the operation execution with the + // primary term update. Since indexShardOperationPermits doesn't guarantee that async submissions are executed + // in the order submitted, combining both operations ensure that the term is updated before the operation is + // executed. It also has the side effect of acquiring all the permits one time instead of two. + final ActionListener operationListener = new ActionListener() { @Override public void onResponse(final Releasable releasable) { if (opPrimaryTerm < operationPrimaryTerm) { @@ -2465,7 +2483,48 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl public void onFailure(final Exception e) { onPermitAcquired.onFailure(e); } - }); + }; + + if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) { + synchronized (mutex) { + if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) { + final IndexShardState shardState = state(); + // only roll translog and update primary term if shard has made it past recovery + // Having a new primary term here means that the old primary failed and that there is a new primary, which again + // means that the master will fail this shard as all initializing shards are failed when a primary is selected + // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint + if (shardState != IndexShardState.POST_RECOVERY && + shardState != IndexShardState.STARTED) { + throw new IndexShardNotStartedException(shardId, shardState); + } + + bumpPrimaryTerm(opPrimaryTerm, () -> { + updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); + final long currentGlobalCheckpoint = getGlobalCheckpoint(); + final long maxSeqNo = seqNoStats().getMaxSeqNo(); + logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]", + opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo); + if (currentGlobalCheckpoint < maxSeqNo) { + resetEngineToGlobalCheckpoint(); + } else { + getEngine().rollTranslogGeneration(); + } + }, allowCombineOperationWithPrimaryTermUpdate ? operationListener : null); + + if (allowCombineOperationWithPrimaryTermUpdate) { + logger.debug("operation execution has been combined with primary term update"); + return; + } + } + } + } + assert opPrimaryTerm <= pendingPrimaryTerm + : "operation primary term [" + opPrimaryTerm + "] should be at most [" + pendingPrimaryTerm + "]"; + operationExecutor.accept(operationListener); + } + + private boolean requirePrimaryTermUpdate(final long opPrimaryTerm, final boolean allPermits) { + return (opPrimaryTerm > pendingPrimaryTerm) || (allPermits && opPrimaryTerm > operationPrimaryTerm); } public int getActiveOperationsCount() { diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 1ed41ea6036..feb48ef85d1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -149,11 +149,8 @@ final class StoreRecovery { final long maxSeqNo, final long maxUnsafeAutoIdTimestamp, IndexMetaData indexMetaData, int shardId, boolean split, boolean hasNested) throws IOException { - // clean target directory (if previous recovery attempt failed) and create a fresh segment file with the proper lucene version - Lucene.cleanLuceneIndex(target); assert sources.length > 0; final int luceneIndexCreatedVersionMajor = Lucene.readSegmentInfos(sources[0]).getIndexCreatedVersionMajor(); - new SegmentInfos(luceneIndexCreatedVersionMajor).commit(target); final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); @@ -164,7 +161,8 @@ final class StoreRecovery { // later once we stared it up otherwise we would need to wait for it here // we also don't specify a codec here and merges should use the engines for this index .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(IndexWriterConfig.OpenMode.APPEND); + .setOpenMode(IndexWriterConfig.OpenMode.CREATE) + .setIndexCreatedVersionMajor(luceneIndexCreatedVersionMajor); if (indexSort != null) { iwc.setIndexSort(indexSort); } @@ -417,7 +415,7 @@ final class StoreRecovery { logger.debug("failed to list file details", e); } } else { - store.createEmpty(); + store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion); final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId, indexShard.getPendingPrimaryTerm()); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 3bec89549a7..66e3e4d5558 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -1404,9 +1404,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref /** * creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted. */ - public void createEmpty() throws IOException { + public void createEmpty(Version luceneVersion) throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory, null)) { + try (IndexWriter writer = newEmptyIndexWriter(directory, luceneVersion)) { final Map map = new HashMap<>(); map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); @@ -1443,7 +1443,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref */ public void bootstrapNewHistory(long maxSeqNo) throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + try (IndexWriter writer = newAppendingIndexWriter(directory, null)) { final Map map = new HashMap<>(); map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); @@ -1461,7 +1461,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref */ public void associateIndexWithNewTranslog(final String translogUUID) throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + try (IndexWriter writer = newAppendingIndexWriter(directory, null)) { if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) { throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]"); } @@ -1480,7 +1480,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref */ public void ensureIndexHasHistoryUUID() throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { + try (IndexWriter writer = newAppendingIndexWriter(directory, null)) { final Map userData = getUserData(writer); if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) { updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID())); @@ -1546,7 +1546,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref + translogUUID + "]"); } if (startingIndexCommit.equals(existingCommits.get(existingCommits.size() - 1)) == false) { - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, startingIndexCommit)) { + try (IndexWriter writer = newAppendingIndexWriter(directory, startingIndexCommit)) { // this achieves two things: // - by committing a new commit based on the starting commit, it make sure the starting commit will be opened // - deletes any other commit (by lucene standard deletion policy) @@ -1578,19 +1578,28 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref return userData; } - private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openMode, final Directory dir, final IndexCommit commit) - throws IOException { - assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; - IndexWriterConfig iwc = new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) - .setCommitOnClose(false) + private static IndexWriter newAppendingIndexWriter(final Directory dir, final IndexCommit commit) throws IOException { + IndexWriterConfig iwc = newIndexWriterConfig() .setIndexCommit(commit) - // we don't want merges to happen here - we call maybe merge on the engine - // later once we stared it up otherwise we would need to wait for it here - // we also don't specify a codec here and merges should use the engines for this index - .setMergePolicy(NoMergePolicy.INSTANCE) - .setOpenMode(openMode); + .setOpenMode(IndexWriterConfig.OpenMode.APPEND); return new IndexWriter(dir, iwc); } + private static IndexWriter newEmptyIndexWriter(final Directory dir, final Version luceneVersion) throws IOException { + IndexWriterConfig iwc = newIndexWriterConfig() + .setOpenMode(IndexWriterConfig.OpenMode.CREATE) + .setIndexCreatedVersionMajor(luceneVersion.major); + return new IndexWriter(dir, iwc); + } + + private static IndexWriterConfig newIndexWriterConfig() { + return new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setCommitOnClose(false) + // we don't want merges to happen here - we call maybe merge on the engine + // later once we stared it up otherwise we would need to wait for it here + // we also don't specify a codec here and merges should use the engines for this index + .setMergePolicy(NoMergePolicy.INSTANCE); + } + } diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index bbc7c755a67..68f175f7ed6 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.DocumentMapperForType; +import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -50,6 +51,7 @@ import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.StringFieldType; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -82,11 +84,7 @@ public class TermVectorsService { final long startTime = nanoTimeSupplier.getAsLong(); final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(), request.type(), request.id()); - final Term uidTerm = indexShard.mapperService().createUidTerm(request.type(), request.id()); - if (uidTerm == null) { - termVectorsResponse.setExists(false); - return termVectorsResponse; - } + final Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(request.id())); Fields termVectorsByField = null; AggregatedDfs dfs = null; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index e156fcebbec..fe80592d009 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -41,7 +41,6 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -100,6 +99,14 @@ public class RepositoriesService implements ClusterStateApplier { registrationListener = listener; } + // Trying to create the new repository on master to make sure it works + try { + closeRepository(createRepository(newRepositoryMetaData)); + } catch (Exception e) { + registrationListener.onFailure(e); + return; + } + clusterService.submitStateUpdateTask(request.cause, new AckedClusterStateUpdateTask(request, registrationListener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { @@ -107,13 +114,8 @@ public class RepositoriesService implements ClusterStateApplier { } @Override - public ClusterState execute(ClusterState currentState) throws IOException { + public ClusterState execute(ClusterState currentState) { ensureRepositoryNotInUse(currentState, request.name); - // Trying to create the new repository on master to make sure it works - if (!registerRepository(newRepositoryMetaData)) { - // The new repository has the same settings as the old one - ignore - return currentState; - } MetaData metaData = currentState.metaData(); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); @@ -127,6 +129,10 @@ public class RepositoriesService implements ClusterStateApplier { for (RepositoryMetaData repositoryMetaData : repositories.repositories()) { if (repositoryMetaData.name().equals(newRepositoryMetaData.name())) { + if (newRepositoryMetaData.equals(repositoryMetaData)) { + // Previous version is the same as this one no update is needed. + return currentState; + } found = true; repositoriesMetaData.add(newRepositoryMetaData); } else { @@ -352,37 +358,8 @@ public class RepositoriesService implements ClusterStateApplier { throw new RepositoryMissingException(repositoryName); } - /** - * Creates a new repository and adds it to the list of registered repositories. - *

- * If a repository with the same name but different types or settings already exists, it will be closed and - * replaced with the new repository. If a repository with the same name exists but it has the same type and settings - * the new repository is ignored. - * - * @param repositoryMetaData new repository metadata - * @return {@code true} if new repository was added or {@code false} if it was ignored - */ - private boolean registerRepository(RepositoryMetaData repositoryMetaData) throws IOException { - Repository previous = repositories.get(repositoryMetaData.name()); - if (previous != null) { - RepositoryMetaData previousMetadata = previous.getMetadata(); - if (previousMetadata.equals(repositoryMetaData)) { - // Previous version is the same as this one - ignore it - return false; - } - } - Repository newRepo = createRepository(repositoryMetaData); - if (previous != null) { - closeRepository(previous); - } - Map newRepositories = new HashMap<>(repositories); - newRepositories.put(repositoryMetaData.name(), newRepo); - repositories = newRepositories; - return true; - } - /** Closes the given repository. */ - private void closeRepository(Repository repository) throws IOException { + private void closeRepository(Repository repository) { logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name()); repository.close(); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b458b831d73..09eb557fe9c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -27,8 +27,6 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexNotFoundException; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -1495,11 +1493,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // version number and no checksum, even though the index itself is perfectly fine to restore, this // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty // shard anyway, we just create the empty shard here and then exit. - IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) - .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) - .setOpenMode(IndexWriterConfig.OpenMode.CREATE) - .setCommitOnClose(true)); - writer.close(); + store.createEmpty(targetShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion); return; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java index a312f6ab284..89b8b9267f6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestTermVectorsAction.java @@ -19,11 +19,9 @@ package org.elasticsearch.rest.action.document; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.VersionType; @@ -45,19 +43,13 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; * TermVectorsRequest. */ public class RestTermVectorsAction extends BaseRestHandler { - private static final DeprecationLogger deprecationLogger = new DeprecationLogger( - LogManager.getLogger(RestTermVectorsAction.class)); public RestTermVectorsAction(Settings settings, RestController controller) { super(settings); - controller.registerWithDeprecatedHandler(GET, "/{index}/{type}/_termvectors", this, - GET, "/{index}/{type}/_termvector", deprecationLogger); - controller.registerWithDeprecatedHandler(POST, "/{index}/{type}/_termvectors", this, - POST, "/{index}/{type}/_termvector", deprecationLogger); - controller.registerWithDeprecatedHandler(GET, "/{index}/{type}/{id}/_termvectors", this, - GET, "/{index}/{type}/{id}/_termvector", deprecationLogger); - controller.registerWithDeprecatedHandler(POST, "/{index}/{type}/{id}/_termvectors", this, - POST, "/{index}/{type}/{id}/_termvector", deprecationLogger); + controller.registerHandler(GET, "/{index}/{type}/_termvectors", this); + controller.registerHandler(POST, "/{index}/{type}/_termvectors", this); + controller.registerHandler(GET, "/{index}/{type}/{id}/_termvectors", this); + controller.registerHandler(POST, "/{index}/{type}/{id}/_termvectors", this); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 663214f49d8..98f2e1d2e7e 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -112,6 +112,7 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -145,6 +146,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv public static final Setting DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS = Setting.boolSetting("search.default_allow_partial_results", true, Property.Dynamic, Property.NodeScope); + public static final Setting MAX_OPEN_SCROLL_CONTEXT = + Setting.intSetting("search.max_open_scroll_context", 500, 0, Property.Dynamic, Property.NodeScope); + private final ThreadPool threadPool; @@ -174,6 +178,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile boolean lowLevelCancellation; + private volatile int maxOpenScrollContext; + private final Cancellable keepAliveReaper; private final AtomicLong idGenerator = new AtomicLong(); @@ -182,6 +188,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final MultiBucketConsumerService multiBucketConsumerService; + private final AtomicInteger openScrollContexts = new AtomicInteger(); + public SearchService(ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase, ResponseCollectorService responseCollectorService) { @@ -212,6 +220,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, this::setDefaultAllowPartialSearchResults); + maxOpenScrollContext = MAX_OPEN_SCROLL_CONTEXT.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_SCROLL_CONTEXT, this::setMaxOpenScrollContext); lowLevelCancellation = LOW_LEVEL_CANCELLATION_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(LOW_LEVEL_CANCELLATION_SETTING, this::setLowLevelCancellation); @@ -243,6 +253,10 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv return defaultAllowPartialSearchResults; } + private void setMaxOpenScrollContext(int maxOpenScrollContext) { + this.maxOpenScrollContext = maxOpenScrollContext; + } + private void setLowLevelCancellation(Boolean lowLevelCancellation) { this.lowLevelCancellation = lowLevelCancellation; } @@ -592,11 +606,19 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } final SearchContext createAndPutContext(ShardSearchRequest request) throws IOException { + if (request.scroll() != null && openScrollContexts.get() >= maxOpenScrollContext) { + throw new ElasticsearchException( + "Trying to create too many scroll contexts. Must be less than or equal to: [" + + maxOpenScrollContext + "]. " + "This limit can be set by changing the [" + + MAX_OPEN_SCROLL_CONTEXT.getKey() + "] setting."); + } + SearchContext context = createContext(request); boolean success = false; try { putContext(context); if (request.scroll() != null) { + openScrollContexts.incrementAndGet(); context.indexShard().getSearchOperationListener().onNewScrollContext(context); } context.indexShard().getSearchOperationListener().onNewContext(context); @@ -696,6 +718,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv assert context.refCount() > 0 : " refCount must be > 0: " + context.refCount(); context.indexShard().getSearchOperationListener().onFreeContext(context); if (context.scrollContext() != null) { + openScrollContexts.decrementAndGet(); context.indexShard().getSearchOperationListener().onFreeScrollContext(context); } return true; diff --git a/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java b/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java index f550c038e60..b028724a80e 100644 --- a/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/TransportMultiGetActionTests.java @@ -91,6 +91,7 @@ public class TransportMultiGetActionTests extends ESTestCase { }; final Index index1 = new Index("index1", randomBase64UUID()); + final Index index2 = new Index("index2", randomBase64UUID()); final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName())) .metaData(new MetaData.Builder() .put(new IndexMetaData.Builder(index1.getName()) @@ -98,33 +99,45 @@ public class TransportMultiGetActionTests extends ESTestCase { .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) .put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID())) - .putMapping("type1", + .putMapping("_doc", XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() - .startObject("type1") + .startObject("_doc") .startObject("_routing") .field("required", false) .endObject() .endObject() - .endObject()), true, XContentType.JSON)) - .putMapping("type2", + .endObject()), true, XContentType.JSON))) + .put(new IndexMetaData.Builder(index2.getName()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID())) + .putMapping("_doc", XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder() .startObject() - .startObject("type2") + .startObject("_doc") .startObject("_routing") .field("required", true) .endObject() .endObject() .endObject()), true, XContentType.JSON)))).build(); - final ShardIterator shardIterator = mock(ShardIterator.class); - when(shardIterator.shardId()).thenReturn(new ShardId(index1, randomInt())); + final ShardIterator index1ShardIterator = mock(ShardIterator.class); + when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt())); + + final ShardIterator index2ShardIterator = mock(ShardIterator.class); + when(index2ShardIterator.shardId()).thenReturn(new ShardId(index2, randomInt())); final OperationRouting operationRouting = mock(OperationRouting.class); when(operationRouting.getShards(eq(clusterState), eq(index1.getName()), anyString(), anyString(), anyString())) - .thenReturn(shardIterator); + .thenReturn(index1ShardIterator); when(operationRouting.shardId(eq(clusterState), eq(index1.getName()), anyString(), anyString())) .thenReturn(new ShardId(index1, randomInt())); + when(operationRouting.getShards(eq(clusterState), eq(index2.getName()), anyString(), anyString(), anyString())) + .thenReturn(index2ShardIterator); + when(operationRouting.shardId(eq(clusterState), eq(index2.getName()), anyString(), anyString())) + .thenReturn(new ShardId(index2, randomInt())); clusterService = mock(ClusterService.class); when(clusterService.localNode()).thenReturn(transportService.getLocalNode()); @@ -153,8 +166,8 @@ public class TransportMultiGetActionTests extends ESTestCase { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE); - request.add(new MultiGetRequest.Item("index1", "type1", "1")); - request.add(new MultiGetRequest.Item("index1", "type1", "2")); + request.add(new MultiGetRequest.Item("index1", "_doc", "1")); + request.add(new MultiGetRequest.Item("index1", "_doc", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiGetAction(transportService, clusterService, shardAction, @@ -178,8 +191,8 @@ public class TransportMultiGetActionTests extends ESTestCase { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE); - request.add(new MultiGetRequest.Item("index1", "type2", "1").routing("1")); - request.add(new MultiGetRequest.Item("index1", "type2", "2")); + request.add(new MultiGetRequest.Item("index2", "_doc", "1").routing("1")); + request.add(new MultiGetRequest.Item("index2", "_doc", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiGetAction(transportService, clusterService, shardAction, @@ -193,7 +206,7 @@ public class TransportMultiGetActionTests extends ESTestCase { assertNull(responses.get(0)); assertThat(responses.get(1).getFailure().getFailure(), instanceOf(RoutingMissingException.class)); assertThat(responses.get(1).getFailure().getFailure().getMessage(), - equalTo("routing is required for [index1]/[type2]/[2]")); + equalTo("routing is required for [index2]/[_doc]/[2]")); } }; diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index a45012dc4b3..442e27c0867 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -506,7 +506,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { for (int id = 0; id < content.length; id++) { Fields[] fields = new Fields[2]; for (int j = 0; j < indexNames.length; j++) { - TermVectorsResponse resp = client().prepareTermVector(indexNames[j], "type1", String.valueOf(id)) + TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], "type1", String.valueOf(id)) .setOffsets(true) .setPositions(true) .setSelectedFields("field1") @@ -1069,7 +1069,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { for (int id = 0; id < content.length; id++) { Fields[] fields = new Fields[2]; for (int j = 0; j < indexNames.length; j++) { - TermVectorsResponse resp = client().prepareTermVector(indexNames[j], "type1", String.valueOf(id)) + TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], "type1", String.valueOf(id)) .setOffsets(true) .setPositions(true) .setSelectedFields("field1", "field2") diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java index d2bae148ef5..db50f752728 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TransportMultiTermVectorsActionTests.java @@ -92,40 +92,53 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase { }; final Index index1 = new Index("index1", randomBase64UUID()); + final Index index2 = new Index("index2", randomBase64UUID()); final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName())) .metaData(new MetaData.Builder() .put(new IndexMetaData.Builder(index1.getName()) - .settings(Settings.builder().put("index.version.created", Version.CURRENT) - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID())) - .putMapping("type1", - XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder() - .startObject() - .startObject("type1") - .startObject("_routing") - .field("required", false) + .settings(Settings.builder().put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID())) + .putMapping("_doc", + XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("_routing") + .field("required", false) + .endObject() .endObject() - .endObject() - .endObject()), true, XContentType.JSON)) - .putMapping("type2", - XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder() - .startObject() - .startObject("type2") - .startObject("_routing") - .field("required", true) + .endObject()), true, XContentType.JSON))) + .put(new IndexMetaData.Builder(index2.getName()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID())) + .putMapping("_doc", + XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("_routing") + .field("required", true) + .endObject() .endObject() - .endObject() - .endObject()), true, XContentType.JSON)))).build(); + .endObject()), true, XContentType.JSON)))).build(); - final ShardIterator shardIterator = mock(ShardIterator.class); - when(shardIterator.shardId()).thenReturn(new ShardId(index1, randomInt())); + final ShardIterator index1ShardIterator = mock(ShardIterator.class); + when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt())); + + final ShardIterator index2ShardIterator = mock(ShardIterator.class); + when(index2ShardIterator.shardId()).thenReturn(new ShardId(index2, randomInt())); final OperationRouting operationRouting = mock(OperationRouting.class); when(operationRouting.getShards(eq(clusterState), eq(index1.getName()), anyString(), anyString(), anyString())) - .thenReturn(shardIterator); + .thenReturn(index1ShardIterator); when(operationRouting.shardId(eq(clusterState), eq(index1.getName()), anyString(), anyString())) .thenReturn(new ShardId(index1, randomInt())); + when(operationRouting.getShards(eq(clusterState), eq(index2.getName()), anyString(), anyString(), anyString())) + .thenReturn(index2ShardIterator); + when(operationRouting.shardId(eq(clusterState), eq(index2.getName()), anyString(), anyString())) + .thenReturn(new ShardId(index2, randomInt())); clusterService = mock(ClusterService.class); when(clusterService.localNode()).thenReturn(transportService.getLocalNode()); @@ -155,8 +168,8 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE); - request.add(new TermVectorsRequest("index1", "type1", "1")); - request.add(new TermVectorsRequest("index1", "type1", "2")); + request.add(new TermVectorsRequest("index1", "_doc", "1")); + request.add(new TermVectorsRequest("index2", "_doc", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiTermVectorsAction(transportService, clusterService, shardAction, @@ -180,8 +193,8 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase { final Task task = createTask(); final NodeClient client = new NodeClient(Settings.EMPTY, threadPool); final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE); - request.add(new TermVectorsRequest("index1", "type2", "1").routing("1")); - request.add(new TermVectorsRequest("index1", "type2", "2")); + request.add(new TermVectorsRequest("index2", "_doc", "1").routing("1")); + request.add(new TermVectorsRequest("index2", "_doc", "2")); final AtomicBoolean shardActionInvoked = new AtomicBoolean(false); transportAction = new TransportMultiTermVectorsAction(transportService, clusterService, shardAction, diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index c4a5e6c39d9..edad8494f54 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -278,7 +278,7 @@ public class AckIT extends ESIntegTestCase { assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword")); for (Client client : clients()) { - assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue()); + assertThat(getLocalClusterState(client).metaData().indices().get("test").getMappings().get("test"), notNullValue()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java index 865059c3379..d7e9767d7a1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataMappingServiceTests.java @@ -61,7 +61,7 @@ public class MetaDataMappingServiceTests extends ESSingleNodeTestCase { // the task really was a mapping update assertThat( indexService.mapperService().documentMapper("type").mappingSource(), - not(equalTo(result.resultingState.metaData().index("test").mapping("type").source()))); + not(equalTo(result.resultingState.metaData().index("test").getMappings().get("type").source()))); // since we never committed the cluster state update, the in-memory state is unchanged assertThat(indexService.mapperService().documentMapper("type").mappingSource(), equalTo(currentMapping)); } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index 115cfcdf26f..698df3d2b7b 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -26,16 +26,19 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.hamcrest.MatcherAssert; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion; @@ -193,4 +196,25 @@ public class VersionsTests extends ESTestCase { assertEquals(size, VersionsAndSeqNoResolver.lookupStates.size()); dir.close(); } + + public void testLuceneVersionOnUnknownVersions() { + List allVersions = VersionUtils.allVersions(); + + // should have the same Lucene version as the latest 6.x version + Version version = Version.fromString("6.88.50"); + assertEquals(allVersions.get(Collections.binarySearch(allVersions, Version.V_7_0_0) - 1).luceneVersion, + version.luceneVersion); + + // between two known versions, should use the lucene version of the previous version + version = Version.fromString("6.2.50"); + assertEquals(VersionUtils.getPreviousVersion(Version.V_6_2_4).luceneVersion, version.luceneVersion); + + // too old version, major should be the oldest supported lucene version minus 1 + version = Version.fromString("5.2.1"); + assertEquals(Version.V_6_0_0.luceneVersion.major - 1, version.luceneVersion.major); + + // future version, should be the same version as today + version = Version.fromString("7.77.1"); + assertEquals(Version.CURRENT.luceneVersion, version.luceneVersion); + } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 112dca8f8c2..6b378d98f96 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -86,7 +86,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase { logger.info("--> verify meta _routing required exists"); MappingMetaData mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData() - .index("test").mapping("type1"); + .index("test").getMappings().get("type1"); assertThat(mappingMd.routing().required(), equalTo(true)); logger.info("--> restarting nodes..."); @@ -96,7 +96,8 @@ public class GatewayIndexStateIT extends ESIntegTestCase { ensureYellow(); logger.info("--> verify meta _routing required exists"); - mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1"); + mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").getMappings() + .get("type1"); assertThat(mappingMd.routing().required(), equalTo(true)); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index c77a9682fa7..a34bc77ac1f 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -67,7 +67,6 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; @@ -127,6 +126,7 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; @@ -2605,7 +2605,7 @@ public class InternalEngineTests extends EngineTestCase { // create { - store.createEmpty(); + store.createEmpty(Version.CURRENT.luceneVersion); final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); @@ -2769,7 +2769,7 @@ public class InternalEngineTests extends EngineTestCase { final Path translogPath = createTempDir(); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get(); - store.createEmpty(); + store.createEmpty(Version.CURRENT.luceneVersion); final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); try (InternalEngine engine = @@ -4585,7 +4585,7 @@ public class InternalEngineTests extends EngineTestCase { final Path translogPath = createTempDir(); store = createStore(); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - store.createEmpty(); + store.createEmpty(Version.CURRENT.luceneVersion); final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); @@ -5454,6 +5454,34 @@ public class InternalEngineTests extends EngineTestCase { } } + public void testOpenSoftDeletesIndexWithSoftDeletesDisabled() throws Exception { + try (Store store = createStore()) { + Path translogPath = createTempDir(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final IndexSettings softDeletesEnabled = IndexSettingsModule.newIndexSettings( + IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(Settings.builder(). + put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)).build()); + final List docs; + try (InternalEngine engine = createEngine( + config(softDeletesEnabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get))) { + List ops = generateReplicaHistory(between(1, 100), randomBoolean()); + applyOperations(engine, ops); + globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint())); + engine.syncTranslog(); + engine.flush(); + docs = getDocIds(engine, true); + } + final IndexSettings softDeletesDisabled = IndexSettingsModule.newIndexSettings( + IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(Settings.builder() + .put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false)).build()); + EngineConfig config = config(softDeletesDisabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get); + trimUnsafeCommits(config); + try (InternalEngine engine = createEngine(config)) { + assertThat(getDocIds(engine, true), equalTo(docs)); + } + } + } + static void trimUnsafeCommits(EngineConfig config) throws IOException { final Store store = config.getStore(); final TranslogConfig translogConfig = config.getTranslogConfig(); @@ -5472,4 +5500,25 @@ public class InternalEngineTests extends EngineTestCase { assertThat(message, engine.getNumDocUpdates(), equalTo(expectedUpdates)); assertThat(message, engine.getNumDocDeletes(), equalTo(expectedDeletes)); } + + public void testStoreHonorsLuceneVersion() throws IOException { + for (Version createdVersion : Arrays.asList( + Version.CURRENT, VersionUtils.getPreviousMinorVersion(), VersionUtils.getFirstVersion())) { + Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexMetaData.SETTING_VERSION_CREATED, createdVersion).build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + try (Store store = createStore(); + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) { + ParsedDocument doc = testParsedDocument("1", null, new Document(), + new BytesArray("{}".getBytes("UTF-8")), null); + engine.index(appendOnlyPrimary(doc, false, 1)); + engine.refresh("test"); + try (Engine.Searcher searcher = engine.acquireSearcher("test")) { + LeafReader leafReader = getOnlyLeafReader(searcher.reader()); + assertEquals(createdVersion.luceneVersion.major, leafReader.getMetaData().getCreatedVersionMajor()); + } + } + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index 90469d71944..579a263b7ba 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.engine; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.mapper.ParsedDocument; @@ -143,7 +144,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (Store store = createStore()) { EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); - store.createEmpty(); + store.createEmpty(Version.CURRENT.luceneVersion); try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null , null, true, Function.identity())) { Class expectedException = LuceneTestCase.TEST_ASSERTS_ENABLED ? AssertionError.class : UnsupportedOperationException.class; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index b3bdd9f33cf..2ec49e5b204 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -1550,4 +1550,21 @@ public class DocumentParserTests extends ESSingleNodeTestCase { assertEquals("Could not dynamically add mapping for field [alias-field.dynamic-field]. " + "Existing mapping for [alias-field] must be of type object but found [alias].", exception.getMessage()); } + + public void testTypeless() throws IOException { + DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser(); + String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject().startObject("type").startObject("properties") + .startObject("foo").field("type", "keyword").endObject() + .endObject().endObject().endObject()); + DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping)); + + BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject() + .field("foo", "1234") + .endObject()); + + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "_doc", "1", bytes, XContentType.JSON)); + assertNull(doc.dynamicMappingsUpdate()); // no update since we reused the existing type + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 6d8f61b847f..d6803249c91 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -80,8 +81,10 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.DeleteResult; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; @@ -733,7 +736,6 @@ public class IndexShardTests extends IndexShardTestCase { return fut.get(); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35850") public void testOperationPermitOnReplicaShards() throws Exception { final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShard indexShard; @@ -1024,7 +1026,6 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(replicaShard, primaryShard); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35850") public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); final int operations = 1024 - scaledRandomIntBetween(0, 1024); @@ -1089,7 +1090,6 @@ public class IndexShardTests extends IndexShardTestCase { closeShard(indexShard, false); } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35850") public void testRollbackReplicaEngineOnPromotion() throws IOException, InterruptedException { final IndexShard indexShard = newStartedShard(false); @@ -1433,7 +1433,7 @@ public class IndexShardTests extends IndexShardTestCase { } long refreshCount = shard.refreshStats().getTotal(); indexDoc(shard, "_doc", "test"); - try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test", + try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "_doc", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) { assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1)); } @@ -2133,7 +2133,7 @@ public class IndexShardTests extends IndexShardTestCase { shard.refresh("test"); try (Engine.GetResult getResult = shard - .get(new Engine.Get(false, false, "test", "1", + .get(new Engine.Get(false, false, "_doc", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) { assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); @@ -2175,7 +2175,7 @@ public class IndexShardTests extends IndexShardTestCase { assertEquals(search.totalHits.value, 1); } try (Engine.GetResult getResult = newShard - .get(new Engine.Get(false, false, "test", "1", + .get(new Engine.Get(false, false, "_doc", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) { assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader @@ -3600,11 +3600,125 @@ public class IndexShardTests extends IndexShardTestCase { closeShard(shard, false); } + public void testConcurrentAcquireAllReplicaOperationsPermitsWithPrimaryTermUpdate() throws Exception { + final IndexShard replica = newStartedShard(false); + indexOnReplicaWithGaps(replica, between(0, 1000), Math.toIntExact(replica.getLocalCheckpoint())); + + final int nbTermUpdates = randomIntBetween(1, 5); + + for (int i = 0; i < nbTermUpdates; i++) { + long opPrimaryTerm = replica.getOperationPrimaryTerm() + 1; + final long globalCheckpoint = replica.getGlobalCheckpoint(); + final long maxSeqNoOfUpdatesOrDeletes = replica.getMaxSeqNoOfUpdatesOrDeletes(); + + final int operations = scaledRandomIntBetween(5, 32); + final CyclicBarrier barrier = new CyclicBarrier(1 + operations); + final CountDownLatch latch = new CountDownLatch(operations); + + final Thread[] threads = new Thread[operations]; + for (int j = 0; j < operations; j++) { + threads[j] = new Thread(() -> { + try { + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + replica.acquireAllReplicaOperationsPermits( + opPrimaryTerm, + globalCheckpoint, + maxSeqNoOfUpdatesOrDeletes, + new ActionListener() { + @Override + public void onResponse(final Releasable releasable) { + try (Releasable ignored = releasable) { + assertThat(replica.getPendingPrimaryTerm(), greaterThanOrEqualTo(opPrimaryTerm)); + assertThat(replica.getOperationPrimaryTerm(), equalTo(opPrimaryTerm)); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(final Exception e) { + try { + throw new RuntimeException(e); + } finally { + latch.countDown(); + } + } + }, TimeValue.timeValueMinutes(30L)); + }); + threads[j].start(); + } + barrier.await(); + latch.await(); + + for (Thread thread : threads) { + thread.join(); + } + } + + closeShard(replica, false); + } + @Override public Settings threadPoolSettings() { return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.estimated_time_interval", "5ms").build(); } + public void testTypelessDelete() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("index") + .putMapping("some_type", "{ \"properties\": {}}") + .settings(settings) + .build(); + IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(shard); + Engine.IndexResult indexResult = indexDoc(shard, "some_type", "id", "{}"); + assertTrue(indexResult.isCreated()); + + DeleteResult deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "some_other_type", "id", VersionType.INTERNAL); + assertFalse(deleteResult.isFound()); + + deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "_doc", "id", VersionType.INTERNAL); + assertTrue(deleteResult.isFound()); + + closeShards(shard); + } + + public void testTypelessGet() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("index") + .putMapping("some_type", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(shard); + Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}"); + assertTrue(indexResult.isCreated()); + + org.elasticsearch.index.engine.Engine.GetResult getResult = shard.get( + new Engine.Get(true, true, "some_type", "0", new Term("_id", Uid.encodeId("0")))); + assertTrue(getResult.exists()); + getResult.close(); + + getResult = shard.get(new Engine.Get(true, true, "some_other_type", "0", new Term("_id", Uid.encodeId("0")))); + assertFalse(getResult.exists()); + getResult.close(); + + getResult = shard.get(new Engine.Get(true, true, "_doc", "0", new Term("_id", Uid.encodeId("0")))); + assertTrue(getResult.exists()); + getResult.close(); + + closeShards(shard); + } + /** * Randomizes the usage of {@link IndexShard#acquireReplicaOperationPermit(long, long, long, ActionListener, String, Object)} and * {@link IndexShard#acquireAllReplicaOperationsPermits(long, long, long, ActionListener, TimeValue)} in order to acquire a permit. diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 25f6bb75cc8..35fbb94c997 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; @@ -114,7 +115,7 @@ public class RefreshListenersTests extends ESTestCase { // we don't need to notify anybody in this test } }; - store.createEmpty(); + store.createEmpty(Version.CURRENT.luceneVersion); final long primaryTerm = randomNonNegativeLong(); final String translogUUID = Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java index b1a67472730..a7ea3904386 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -28,6 +28,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -99,6 +100,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36189") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0) public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase { diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 04d15d39b58..7db904f89df 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; @@ -77,4 +78,30 @@ public class ShardGetServiceTests extends IndexShardTestCase { closeShards(primary); } + + public void testTypelessGetForUpdate() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("index") + .putMapping("some_type", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(shard); + Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}"); + assertTrue(indexResult.isCreated()); + + GetResult getResult = shard.getService().getForUpdate("some_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL); + assertTrue(getResult.isExists()); + + getResult = shard.getService().getForUpdate("some_other_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL); + assertFalse(getResult.isExists()); + + getResult = shard.getService().getForUpdate("_doc", "0", Versions.MATCH_ANY, VersionType.INTERNAL); + assertTrue(getResult.isExists()); + + closeShards(shard); + } } diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 82e0c88647e..d6690fd27cc 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -1035,7 +1035,7 @@ public class StoreTests extends ESTestCase { final ShardId shardId = new ShardId("index", "_na_", 1); try (Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId))) { - store.createEmpty(); + store.createEmpty(Version.LATEST); // remove the history uuid IndexWriterConfig iwc = new IndexWriterConfig(null) @@ -1067,7 +1067,7 @@ public class StoreTests extends ESTestCase { final ShardId shardId = new ShardId("index", "_na_", 1); try (Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId))) { - store.createEmpty(); + store.createEmpty(Version.LATEST); SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY)); diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestTermVectorsActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestTermVectorsActionTests.java deleted file mode 100644 index 88c867b0e56..00000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestTermVectorsActionTests.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.document; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestRequest.Method; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.rest.FakeRestChannel; -import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.usage.UsageService; - -import java.util.Collections; - -import static org.mockito.Mockito.mock; - -public class RestTermVectorsActionTests extends ESTestCase { - private RestController controller; - - public void setUp() throws Exception { - super.setUp(); - controller = new RestController(Collections.emptySet(), null, - mock(NodeClient.class), - new NoneCircuitBreakerService(), - new UsageService()); - new RestTermVectorsAction(Settings.EMPTY, controller); - } - - public void testDeprecatedEndpoint() { - RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) - .withMethod(Method.POST) - .withPath("/some_index/some_type/some_id/_termvector") - .build(); - - performRequest(request); - assertWarnings("[POST /{index}/{type}/{id}/_termvector] is deprecated! Use" + - " [POST /{index}/{type}/{id}/_termvectors] instead."); - } - - private void performRequest(RestRequest request) { - RestChannel channel = new FakeRestChannel(request, false, 1); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - controller.dispatchRequest(request, channel, threadContext); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 45adc1149a3..30598311ad5 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -21,12 +21,14 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -76,6 +78,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.LinkedList; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -417,6 +420,44 @@ public class SearchServiceTests extends ESSingleNodeTestCase { } } + /** + * test that creating more than the allowed number of scroll contexts throws an exception + */ + public void testMaxOpenScrollContexts() throws RuntimeException { + createIndex("index"); + client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + final SearchService service = getInstanceFromNode(SearchService.class); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); + + // Open all possible scrolls, clear some of them, then open more until the limit is reached + LinkedList clearScrollIds = new LinkedList<>(); + + for (int i = 0; i < SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); i++) { + SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get(); + + if (randomInt(4) == 0) clearScrollIds.addLast(searchResponse.getScrollId()); + } + + ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); + clearScrollRequest.setScrollIds(clearScrollIds); + client().clearScroll(clearScrollRequest); + + for (int i = 0; i < clearScrollIds.size(); i++) { + client().prepareSearch("index").setSize(1).setScroll("1m").get(); + } + + ElasticsearchException ex = expectThrows(ElasticsearchException.class, + () -> service.createAndPutContext(new ShardScrollRequestTest(indexShard.shardId()))); + assertEquals( + "Trying to create too many scroll contexts. Must be less than or equal to: [" + + SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY) + "]. " + + "This limit can be set by changing the [search.max_open_scroll_context] setting.", + ex.getMessage()); + } + public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin { @Override public List> getQueries() { @@ -472,6 +513,22 @@ public class SearchServiceTests extends ESSingleNodeTestCase { } } + public static class ShardScrollRequestTest extends ShardSearchLocalRequest { + private Scroll scroll; + + ShardScrollRequestTest(ShardId shardId) { + super(shardId, 1, SearchType.DEFAULT, new SearchSourceBuilder(), + new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, true, null, null); + + this.scroll = new Scroll(TimeValue.timeValueMinutes(1)); + } + + @Override + public Scroll scroll() { + return this.scroll; + } + } + public void testCanMatch() throws IOException { createIndex("index"); final SearchService service = getInstanceFromNode(SearchService.class); diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java index 628c54468af..63e2c31ea3e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -97,6 +97,16 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase { assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue()); assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue()); + logger.info("--> check that trying to create a repository with the same settings repeatedly does not update cluster state"); + String beforeStateUuid = clusterStateResponse.getState().stateUUID(); + assertThat( + client.admin().cluster().preparePutRepository("test-repo-1") + .setType("fs").setSettings(Settings.builder() + .put("location", location) + ).get().isAcknowledged(), + equalTo(true)); + assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); + logger.info("--> delete repository test-repo-1"); client.admin().cluster().prepareDeleteRepository("test-repo-1").get(); repositoriesResponse = client.admin().cluster().prepareGetRepositories().get(); diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 12653cc6489..b6a28a9278a 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -64,7 +64,7 @@ thirdPartyAudit.excludes = [ task namingConventionsMain(type: org.elasticsearch.gradle.precommit.NamingConventionsTask) { checkForTestsInMain = true - javaHome = project.runtimeJavaHome + javaHome = project.compilerJavaHome } precommit.dependsOn namingConventionsMain diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 8158830e96f..513d76e2a31 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -51,6 +51,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -499,7 +500,7 @@ public abstract class EngineTestCase extends ESTestCase { final Store store = config.getStore(); final Directory directory = store.directory(); if (Lucene.indexExists(directory) == false) { - store.createEmpty(); + store.createEmpty(config.getIndexSettings().getIndexVersionCreated().luceneVersion); final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUuid); @@ -704,6 +705,32 @@ public abstract class EngineTestCase extends ESTestCase { return ops; } + public List generateReplicaHistory(int numOps, boolean allowGapInSeqNo) { + long seqNo = 0; + List operations = new ArrayList<>(numOps); + for (int i = 0; i < numOps; i++) { + String id = Integer.toString(between(1, 100)); + final ParsedDocument doc = EngineTestCase.createParsedDoc(id, null); + if (randomBoolean()) { + operations.add(new Engine.Index(EngineTestCase.newUid(doc), doc, seqNo, primaryTerm.get(), + i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(), + -1, true)); + } else if (randomBoolean()) { + operations.add(new Engine.Delete(doc.type(), doc.id(), EngineTestCase.newUid(doc), seqNo, primaryTerm.get(), + i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis())); + } else { + operations.add(new Engine.NoOp(seqNo, primaryTerm.get(), Engine.Operation.Origin.REPLICA, + threadPool.relativeTimeInMillis(), "test-" + i)); + } + seqNo++; + if (allowGapInSeqNo && rarely()) { + seqNo++; + } + } + Randomness.shuffle(operations); + return operations; + } + public static void assertOpsOnReplica( final List ops, final InternalEngine replicaEngine, @@ -788,14 +815,7 @@ public abstract class EngineTestCase extends ESTestCase { int docOffset; while ((docOffset = offset.incrementAndGet()) < ops.size()) { try { - final Engine.Operation op = ops.get(docOffset); - if (op instanceof Engine.Index) { - engine.index((Engine.Index) op); - } else if (op instanceof Engine.Delete){ - engine.delete((Engine.Delete) op); - } else { - engine.noOp((Engine.NoOp) op); - } + applyOperation(engine, ops.get(docOffset)); if ((docOffset + 1) % 4 == 0) { engine.refresh("test"); } @@ -814,6 +834,36 @@ public abstract class EngineTestCase extends ESTestCase { } } + public static void applyOperations(Engine engine, List operations) throws IOException { + for (Engine.Operation operation : operations) { + applyOperation(engine, operation); + if (randomInt(100) < 10) { + engine.refresh("test"); + } + if (rarely()) { + engine.flush(); + } + } + } + + public static Engine.Result applyOperation(Engine engine, Engine.Operation operation) throws IOException { + final Engine.Result result; + switch (operation.operationType()) { + case INDEX: + result = engine.index((Engine.Index) operation); + break; + case DELETE: + result = engine.delete((Engine.Delete) operation); + break; + case NO_OP: + result = engine.noOp((Engine.NoOp) operation); + break; + default: + throw new IllegalStateException("No operation defined for [" + operation + "]"); + } + return result; + } + /** * Gets a collection of tuples of docId, sequence number, and primary term of all live documents in the provided engine. */ diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index 3985b90a71b..77ac94da4aa 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -160,15 +160,22 @@ public final class CcrLicenseChecker { final ClusterStateRequest request, final Consumer onFailure, final Consumer leaderClusterStateConsumer) { - checkRemoteClusterLicenseAndFetchClusterState( + try { + Client remoteClient = systemClient(client.getRemoteClusterClient(clusterAlias)); + checkRemoteClusterLicenseAndFetchClusterState( client, clusterAlias, - systemClient(client.getRemoteClusterClient(clusterAlias)), + remoteClient, request, onFailure, leaderClusterStateConsumer, CcrLicenseChecker::clusterStateNonCompliantRemoteLicense, e -> clusterStateUnknownRemoteLicense(clusterAlias, e)); + } catch (Exception e) { + // client.getRemoteClusterClient(...) can fail with a IllegalArgumentException if remote + // connection is unknown + onFailure.accept(e); + } } /** diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 0e86aa157ad..6bddedc0104 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -403,6 +403,13 @@ public class AutoFollowCoordinator implements ClusterStateApplier { return currentState -> { AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE); Map> newFollowedIndexUUIDS = new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs()); + if (newFollowedIndexUUIDS.containsKey(name) == false) { + // A delete auto follow pattern request can have removed the auto follow pattern while we want to update + // the auto follow metadata with the fact that an index was successfully auto followed. If this + // happens, we can just skip this step. + return currentState; + } + newFollowedIndexUUIDS.compute(name, (key, existingUUIDs) -> { assert existingUUIDs != null; List newUUIDs = new ArrayList<>(existingUUIDs); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 4624a3622b9..2b7fee13502 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -40,8 +40,10 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import static org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower.recordLeaderIndexAsFollowFunction; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Matchers.anyString; @@ -384,6 +386,33 @@ public class AutoFollowCoordinatorTests extends ESTestCase { assertThat(result.get(1).getName(), equalTo("index2")); } + public void testRecordLeaderIndexAsFollowFunction() { + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(Collections.emptyMap(), + Collections.singletonMap("pattern1", Collections.emptyList()), Collections.emptyMap()); + ClusterState clusterState = new ClusterState.Builder(new ClusterName("name")) + .metaData(new MetaData.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .build(); + Function function = recordLeaderIndexAsFollowFunction("pattern1", new Index("index1", "index1")); + + ClusterState result = function.apply(clusterState); + AutoFollowMetadata autoFollowMetadataResult = result.metaData().custom(AutoFollowMetadata.TYPE); + assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1"), notNullValue()); + assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1").size(), equalTo(1)); + assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1").get(0), equalTo("index1")); + } + + public void testRecordLeaderIndexAsFollowFunctionNoEntry() { + AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(Collections.emptyMap(), Collections.emptyMap(), + Collections.emptyMap()); + ClusterState clusterState = new ClusterState.Builder(new ClusterName("name")) + .metaData(new MetaData.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .build(); + Function function = recordLeaderIndexAsFollowFunction("pattern1", new Index("index1", "index1")); + + ClusterState result = function.apply(clusterState); + assertThat(result, sameInstance(clusterState)); + } + public void testGetFollowerIndexName() { AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null, null, null, null); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index de5291f2b20..0719329ece4 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -280,7 +280,7 @@ public class FollowingEngineTests extends ESTestCase { } private FollowingEngine createEngine(Store store, EngineConfig config) throws IOException { - store.createEmpty(); + store.createEmpty(config.getIndexSettings().getIndexVersionCreated().luceneVersion); final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L); store.associateIndexWithNewTranslog(translogUuid); @@ -485,7 +485,7 @@ public class FollowingEngineTests extends ESTestCase { IndexMetaData leaderIndexMetaData = IndexMetaData.builder(index.getName()).settings(leaderSettings).build(); IndexSettings leaderIndexSettings = new IndexSettings(leaderIndexMetaData, leaderSettings); try (Store leaderStore = createStore(shardId, leaderIndexSettings, newDirectory())) { - leaderStore.createEmpty(); + leaderStore.createEmpty(leaderIndexMetaData.getCreationVersion().luceneVersion); EngineConfig leaderConfig = engineConfig(shardId, leaderIndexSettings, threadPool, leaderStore, logger, xContentRegistry()); leaderStore.associateIndexWithNewTranslog(Translog.createEmptyTranslog( leaderConfig.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 29956fcc259..0525ca28b1b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -13,9 +13,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -31,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlStrings; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; +import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer; import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils; import java.io.IOException; @@ -43,6 +46,7 @@ import java.util.Map; import java.util.Objects; import java.util.Random; import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; /** * Datafeed configuration options. Describes where to proactively pull input @@ -60,6 +64,45 @@ public class DatafeedConfig extends AbstractDiffable implements private static final int TWO_MINS_SECONDS = 2 * SECONDS_IN_MINUTE; private static final int TWENTY_MINS_SECONDS = 20 * SECONDS_IN_MINUTE; private static final int HALF_DAY_SECONDS = 12 * 60 * SECONDS_IN_MINUTE; + static final XContentObjectTransformer QUERY_TRANSFORMER = XContentObjectTransformer.queryBuilderTransformer(); + private static final BiFunction, String, QueryBuilder> lazyQueryParser = (objectMap, id) -> { + try { + return QUERY_TRANSFORMER.fromMap(objectMap); + } catch (IOException | XContentParseException exception) { + // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user + if (exception.getCause() instanceof IllegalArgumentException) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, + id, + exception.getCause().getMessage()), + exception.getCause()); + } else { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, exception, id), + exception); + } + } + }; + + static final XContentObjectTransformer AGG_TRANSFORMER = XContentObjectTransformer.aggregatorTransformer(); + private static final BiFunction, String, AggregatorFactories.Builder> lazyAggParser = (objectMap, id) -> { + try { + return AGG_TRANSFORMER.fromMap(objectMap); + } catch (IOException | XContentParseException exception) { + // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user + if (exception.getCause() instanceof IllegalArgumentException) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, + id, + exception.getCause().getMessage()), + exception.getCause()); + } else { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, exception.getMessage(), id), + exception); + } + } + }; // Used for QueryPage public static final ParseField RESULTS_FIELD = new ParseField("datafeeds"); @@ -90,6 +133,21 @@ public class DatafeedConfig extends AbstractDiffable implements public static final ObjectParser LENIENT_PARSER = createParser(true); public static final ObjectParser STRICT_PARSER = createParser(false); + public static void validateAggregations(AggregatorFactories.Builder aggregations) { + if (aggregations == null) { + return; + } + Collection aggregatorFactories = aggregations.getAggregatorFactories(); + if (aggregatorFactories.isEmpty()) { + throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM); + } + + AggregationBuilder histogramAggregation = ExtractorUtils.getHistogramAggregation(aggregatorFactories); + Builder.checkNoMoreHistogramAggregations(histogramAggregation.getSubAggregations()); + Builder.checkHistogramAggregationHasChildMaxTimeAgg(histogramAggregation); + Builder.checkHistogramIntervalIsPositive(histogramAggregation); + } + private static ObjectParser createParser(boolean ignoreUnknownFields) { ObjectParser parser = new ObjectParser<>("datafeed_config", ignoreUnknownFields, Builder::new); @@ -102,9 +160,15 @@ public class DatafeedConfig extends AbstractDiffable implements builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); parser.declareString((builder, val) -> builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); - parser.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); - parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); - parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); + if (ignoreUnknownFields) { + parser.declareObject(Builder::setQuery, (p, c) -> p.map(), QUERY); + parser.declareObject(Builder::setAggregations, (p, c) -> p.map(), AGGREGATIONS); + parser.declareObject(Builder::setAggregations, (p, c) -> p.map(), AGGS); + } else { + parser.declareObject(Builder::setParsedQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); + parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); + parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS); + } parser.declareObject(Builder::setScriptFields, (p, c) -> { List parsedScriptFields = new ArrayList<>(); while (p.nextToken() != XContentParser.Token.END_OBJECT) { @@ -146,16 +210,18 @@ public class DatafeedConfig extends AbstractDiffable implements private final List indices; private final List types; - private final QueryBuilder query; - private final AggregatorFactories.Builder aggregations; + private final Map query; + private final Map aggregations; private final List scriptFields; private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final Map headers; private final DelayedDataCheckConfig delayedDataCheckConfig; + private final CachedSupplier querySupplier; + private final CachedSupplier aggSupplier; private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, List types, - QueryBuilder query, AggregatorFactories.Builder aggregations, List scriptFields, + Map query, Map aggregations, List scriptFields, Integer scrollSize, ChunkingConfig chunkingConfig, Map headers, DelayedDataCheckConfig delayedDataCheckConfig) { this.id = id; @@ -171,6 +237,8 @@ public class DatafeedConfig extends AbstractDiffable implements this.chunkingConfig = chunkingConfig; this.headers = Collections.unmodifiableMap(headers); this.delayedDataCheckConfig = delayedDataCheckConfig; + this.querySupplier = new CachedSupplier<>(() -> lazyQueryParser.apply(query, id)); + this.aggSupplier = new CachedSupplier<>(() -> lazyAggParser.apply(aggregations, id)); } public DatafeedConfig(StreamInput in) throws IOException { @@ -188,8 +256,17 @@ public class DatafeedConfig extends AbstractDiffable implements } else { this.types = null; } - this.query = in.readNamedWriteable(QueryBuilder.class); - this.aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new); + if (in.getVersion().before(Version.V_6_6_0)) { + this.query = QUERY_TRANSFORMER.toMap(in.readNamedWriteable(QueryBuilder.class)); + this.aggregations = AGG_TRANSFORMER.toMap(in.readOptionalWriteable(AggregatorFactories.Builder::new)); + } else { + this.query = in.readMap(); + if (in.readBoolean()) { + this.aggregations = in.readMap(); + } else { + this.aggregations = null; + } + } if (in.readBoolean()) { this.scriptFields = Collections.unmodifiableList(in.readList(SearchSourceBuilder.ScriptField::new)); } else { @@ -207,6 +284,8 @@ public class DatafeedConfig extends AbstractDiffable implements } else { delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); } + this.querySupplier = new CachedSupplier<>(() -> lazyQueryParser.apply(query, id)); + this.aggSupplier = new CachedSupplier<>(() -> lazyAggParser.apply(aggregations, id)); } public String getId() { @@ -237,11 +316,19 @@ public class DatafeedConfig extends AbstractDiffable implements return scrollSize; } - public QueryBuilder getQuery() { + public QueryBuilder getParsedQuery() { + return querySupplier.get(); + } + + public Map getQuery() { return query; } - public AggregatorFactories.Builder getAggregations() { + public AggregatorFactories.Builder getParsedAggregations() { + return aggSupplier.get(); + } + + public Map getAggregations() { return aggregations; } @@ -249,14 +336,14 @@ public class DatafeedConfig extends AbstractDiffable implements * Returns the histogram's interval as epoch millis. */ public long getHistogramIntervalMillis() { - return ExtractorUtils.getHistogramIntervalMillis(aggregations); + return ExtractorUtils.getHistogramIntervalMillis(getParsedAggregations()); } /** * @return {@code true} when there are non-empty aggregations, {@code false} otherwise */ public boolean hasAggregations() { - return aggregations != null && aggregations.count() > 0; + return aggregations != null && aggregations.size() > 0; } public List getScriptFields() { @@ -293,8 +380,16 @@ public class DatafeedConfig extends AbstractDiffable implements } else { out.writeBoolean(false); } - out.writeNamedWriteable(query); - out.writeOptionalWriteable(aggregations); + if (out.getVersion().before(Version.V_6_6_0)) { + out.writeNamedWriteable(getParsedQuery()); + out.writeOptionalWriteable(getParsedAggregations()); + } else { + out.writeMap(query); + out.writeBoolean(aggregations != null); + if (aggregations != null) { + out.writeMap(aggregations); + } + } if (scriptFields != null) { out.writeBoolean(true); out.writeList(scriptFields); @@ -454,15 +549,20 @@ public class DatafeedConfig extends AbstractDiffable implements private TimeValue frequency; private List indices = Collections.emptyList(); private List types = Collections.emptyList(); - private QueryBuilder query = QueryBuilders.matchAllQuery(); - private AggregatorFactories.Builder aggregations; + private Map query; + private Map aggregations; private List scriptFields; private Integer scrollSize = DEFAULT_SCROLL_SIZE; private ChunkingConfig chunkingConfig; private Map headers = Collections.emptyMap(); private DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); + + public Builder() { + try { + this.query = QUERY_TRANSFORMER.toMap(QueryBuilders.matchAllQuery()); + } catch (IOException ex) { /*Should never happen*/ } } public Builder(String id, String jobId) { @@ -517,11 +617,47 @@ public class DatafeedConfig extends AbstractDiffable implements this.frequency = frequency; } - public void setQuery(QueryBuilder query) { + public void setParsedQuery(QueryBuilder query) { + try { + setQuery(QUERY_TRANSFORMER.toMap(ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()))); + } catch (IOException | XContentParseException exception) { + if (exception.getCause() instanceof IllegalArgumentException) { + // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, + id, + exception.getCause().getMessage()), + exception.getCause()); + } else { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, id, exception.getMessage()), exception); + } + } + } + + void setQuery(Map query) { this.query = ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName()); } - public void setAggregations(AggregatorFactories.Builder aggregations) { + public void setParsedAggregations(AggregatorFactories.Builder aggregations) { + try { + setAggregations(AGG_TRANSFORMER.toMap(aggregations)); + } catch (IOException | XContentParseException exception) { + // Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user + if (exception.getCause() instanceof IllegalArgumentException) { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, + id, + exception.getCause().getMessage()), + exception.getCause()); + } else { + throw ExceptionsHelper.badRequestException( + Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id, exception.getMessage()), exception); + } + } + } + + void setAggregations(Map aggregations) { this.aggregations = aggregations; } @@ -564,30 +700,22 @@ public class DatafeedConfig extends AbstractDiffable implements throw invalidOptionValue(TYPES.getPreferredName(), types); } - validateAggregations(); + validateScriptFields(); setDefaultChunkingConfig(); + setDefaultQueryDelay(); return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, chunkingConfig, headers, delayedDataCheckConfig); } - void validateAggregations() { + void validateScriptFields() { if (aggregations == null) { return; } if (scriptFields != null && !scriptFields.isEmpty()) { throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.DATAFEED_CONFIG_CANNOT_USE_SCRIPT_FIELDS_WITH_AGGS)); + Messages.getMessage(Messages.DATAFEED_CONFIG_CANNOT_USE_SCRIPT_FIELDS_WITH_AGGS)); } - Collection aggregatorFactories = aggregations.getAggregatorFactories(); - if (aggregatorFactories.isEmpty()) { - throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM); - } - - AggregationBuilder histogramAggregation = ExtractorUtils.getHistogramAggregation(aggregatorFactories); - checkNoMoreHistogramAggregations(histogramAggregation.getSubAggregations()); - checkHistogramAggregationHasChildMaxTimeAgg(histogramAggregation); - checkHistogramIntervalIsPositive(histogramAggregation); } private static void checkNoMoreHistogramAggregations(Collection aggregations) { @@ -630,7 +758,7 @@ public class DatafeedConfig extends AbstractDiffable implements if (aggregations == null) { chunkingConfig = ChunkingConfig.newAuto(); } else { - long histogramIntervalMillis = ExtractorUtils.getHistogramIntervalMillis(aggregations); + long histogramIntervalMillis = ExtractorUtils.getHistogramIntervalMillis(lazyAggParser.apply(aggregations, id)); chunkingConfig = ChunkingConfig.newManual(TimeValue.timeValueMillis( DEFAULT_AGGREGATION_CHUNKING_BUCKETS * histogramIntervalMillis)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 25a97d081e6..14bfbea475f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -295,10 +295,11 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { builder.setTypes(types); } if (query != null) { - builder.setQuery(query); + builder.setParsedQuery(query); } if (aggregations != null) { - builder.setAggregations(aggregations); + DatafeedConfig.validateAggregations(aggregations); + builder.setParsedAggregations(aggregations); } if (scriptFields != null) { builder.setScriptFields(scriptFields); @@ -371,9 +372,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { && (queryDelay == null || Objects.equals(queryDelay, datafeed.getQueryDelay())) && (indices == null || Objects.equals(indices, datafeed.getIndices())) && (types == null || Objects.equals(types, datafeed.getTypes())) - && (query == null || Objects.equals(query, datafeed.getQuery())) + && (query == null || Objects.equals(query, datafeed.getParsedQuery())) && (scrollSize == null || Objects.equals(scrollSize, datafeed.getQueryDelay())) - && (aggregations == null || Objects.equals(aggregations, datafeed.getAggregations())) + && (aggregations == null || Objects.equals(aggregations, datafeed.getParsedAggregations())) && (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields())) && (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig())) && (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig())); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index fcec1ff32f9..038b9a7a1ed 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -26,6 +26,8 @@ public final class Messages { "delayed_data_check_config: check_window [{0}] must be greater than the bucket_span [{1}]"; public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS = "delayed_data_check_config: check_window [{0}] must be less than 10,000x the bucket_span [{1}]"; + public static final String DATAFEED_CONFIG_QUERY_BAD_FORMAT = "Datafeed [{0}] query is not parsable: {1}"; + public static final String DATAFEED_CONFIG_AGG_BAD_FORMAT = "Datafeed [{0}] aggregations are not parsable: {1}"; public static final String DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY = "A job configured with datafeed cannot support latency"; public static final String DATAFEED_NOT_FOUND = "No datafeed with id [{0}] exists"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java index 00453d3680f..5d25b9d71e6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/XContentObjectTransformer.java @@ -61,6 +61,9 @@ public class XContentObjectTransformer { } public T fromMap(Map stringObjectMap) throws IOException { + if (stringObjectMap == null) { + return null; + } LoggingDeprecationAccumulationHandler deprecationLogger = new LoggingDeprecationAccumulationHandler(); try(XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(stringObjectMap); XContentParser parser = XContentType.JSON @@ -74,6 +77,9 @@ public class XContentObjectTransformer { } public Map toMap(T object) throws IOException { + if (object == null) { + return null; + } try(XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { XContentBuilder content = object.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); return XContentHelper.convertToMap(BytesReference.bytes(content), true, XContentType.JSON).v2(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index fe7c5b1a1d1..2787f67952a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -67,7 +67,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase config.getParsedQuery()); + assertEquals("[match] query doesn't support multiple fields, found [query] and [type]", e.getMessage()); + } + + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_QUERY_DATAFEED)) { + + XContentParseException e = expectThrows(XContentParseException.class, + () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); + assertEquals("[6:25] [datafeed_config] failed to parse field [query]", e.getMessage()); + } + } + + public void testPastAggConfigParse() throws IOException { + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) { + + DatafeedConfig.Builder configBuilder = DatafeedConfig.LENIENT_PARSER.apply(parser, null); + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> configBuilder.build()); + assertEquals( + "Datafeed [farequote-datafeed] aggregations are not parsable: [size] must be greater than 0. Found [0] in [airline]", + e.getMessage()); + } + + try(XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) { + + XContentParseException e = expectThrows(XContentParseException.class, + () -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build()); + assertEquals("[8:25] [datafeed_config] failed to parse field [aggregations]", e.getMessage()); + } + } + public void testFutureMetadataParse() throws IOException { XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED); @@ -274,7 +349,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase0 for histogram aggregation [time]")); } public void testBuild_GivenDateHistogramWithInvalidTimeZone() { @@ -341,7 +416,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase createDatafeedWithDateHistogram((String) null)); - assertThat(e.getMessage(), equalTo("Aggregation interval must be greater than 0")); + assertThat(e.getMessage(), containsString("Aggregation interval must be greater than 0")); } public void testBuild_GivenValidDateHistogram() { @@ -402,9 +477,8 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase DatafeedConfig.validateAggregations(new AggregatorFactories.Builder().addAggregator(toplevelTerms))); assertEquals("Aggregations can only have 1 date_histogram or histogram aggregation", e.getMessage()); } @@ -520,7 +594,9 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase headers, ActionListener listener) { + DatafeedConfig.validateAggregations(request.getDatafeed().getParsedAggregations()); clusterService.submitStateUpdateTask( "put-datafeed-" + request.getDatafeed().getId(), new AckedClusterStateUpdateTask(request, listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 6d37b08a7e8..de0caee778e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -90,6 +90,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction listener) { final AggregationBuilder datafeedHistogramAggregation = getHistogramAggregation( - datafeed.getAggregations().getAggregatorFactories()); + datafeed.getParsedAggregations().getAggregatorFactories()); if ((datafeedHistogramAggregation instanceof DateHistogramAggregationBuilder) == false) { listener.onFailure( new IllegalArgumentException("Rollup requires that the datafeed configuration use a [date_histogram] aggregation," + @@ -104,7 +104,7 @@ public class RollupDataExtractorFactory implements DataExtractorFactory { return; } final List flattenedAggs = new ArrayList<>(); - flattenAggregations(datafeed.getAggregations().getAggregatorFactories(), datafeedHistogramAggregation, flattenedAggs); + flattenAggregations(datafeed.getParsedAggregations().getAggregatorFactories(), datafeedHistogramAggregation, flattenedAggs); if (validIntervalCaps.stream().noneMatch(rollupJobConfig -> hasAggregations(rollupJobConfig, flattenedAggs))) { listener.onFailure( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java index 67079cf2e67..68161507ed7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactory.java @@ -36,7 +36,7 @@ public class ChunkedDataExtractorFactory implements DataExtractorFactory { job.getDataDescription().getTimeField(), datafeedConfig.getIndices(), datafeedConfig.getTypes(), - datafeedConfig.getQuery(), + datafeedConfig.getParsedQuery(), datafeedConfig.getScrollSize(), timeAligner.alignToCeil(start), timeAligner.alignToFloor(end), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index 67689bd51b8..986387c2ed8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -44,7 +44,7 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory { extractedFields, datafeedConfig.getIndices(), datafeedConfig.getTypes(), - datafeedConfig.getQuery(), + datafeedConfig.getParsedQuery(), datafeedConfig.getScriptFields(), datafeedConfig.getScrollSize(), start, diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java index 50a016f6e5e..b2f11075907 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java @@ -81,7 +81,7 @@ public class TransportPreviewDatafeedActionTests extends ESTestCase { DatafeedConfig.Builder datafeed = new DatafeedConfig.Builder("no_aggs_feed", "job_foo"); datafeed.setIndices(Collections.singletonList("my_index")); MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); - datafeed.setAggregations(AggregatorFactories.builder().addAggregator( + datafeed.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.histogram("time").interval(300000).subAggregation(maxTime).field("time"))); datafeed.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1))); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java index 1507e106c61..4007671bbbc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobValidatorTests.java @@ -222,7 +222,7 @@ public class DatafeedJobValidatorTests extends ESTestCase { HistogramAggregationBuilder histogram = AggregationBuilders.histogram("time").interval(interval).field("time").subAggregation(maxTime); DatafeedConfig.Builder datafeedConfig = createValidDatafeedConfig(); - datafeedConfig.setAggregations(new AggregatorFactories.Builder().addAggregator(histogram)); + datafeedConfig.setParsedAggregations(new AggregatorFactories.Builder().addAggregator(histogram)); return datafeedConfig; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index 9e229e2b057..1478a485cc4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -143,7 +143,7 @@ public class DataExtractorFactoryTests extends ESTestCase { jobBuilder.setDataDescription(dataDescription); DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.histogram("time").interval(300000).subAggregation(maxTime).field("time"))); ActionListener listener = ActionListener.wrap( @@ -162,7 +162,7 @@ public class DataExtractorFactoryTests extends ESTestCase { DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); datafeedConfig.setChunkingConfig(ChunkingConfig.newOff()); MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.histogram("time").interval(300000).subAggregation(maxTime).field("time"))); ActionListener listener = ActionListener.wrap( @@ -180,7 +180,7 @@ public class DataExtractorFactoryTests extends ESTestCase { jobBuilder.setDataDescription(dataDescription); DatafeedConfig.Builder datafeedConfig = DatafeedManagerTests.createDatafeedConfig("datafeed1", "foo"); MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.histogram("time").interval(300000).subAggregation(maxTime).field("time"))); datafeedConfig.setChunkingConfig(ChunkingConfig.newAuto()); @@ -203,7 +203,7 @@ public class DataExtractorFactoryTests extends ESTestCase { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); ActionListener listener = ActionListener.wrap( dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(RollupDataExtractorFactory.class)), @@ -223,7 +223,7 @@ public class DataExtractorFactoryTests extends ESTestCase { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); ActionListener listener = ActionListener.wrap( dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(ChunkedDataExtractorFactory.class)), @@ -263,7 +263,7 @@ public class DataExtractorFactoryTests extends ESTestCase { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); ActionListener listener = ActionListener.wrap( dataExtractorFactory -> fail(), @@ -288,7 +288,7 @@ public class DataExtractorFactoryTests extends ESTestCase { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("myField"); TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); ActionListener listener = ActionListener.wrap( dataExtractorFactory -> fail(), @@ -312,7 +312,7 @@ public class DataExtractorFactoryTests extends ESTestCase { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); MaxAggregationBuilder myField = AggregationBuilders.max("myField").field("otherField"); TermsAggregationBuilder myTerm = AggregationBuilders.terms("termAgg").field("termField").subAggregation(myField); - datafeedConfig.setAggregations(AggregatorFactories.builder().addAggregator( + datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); ActionListener listener = ActionListener.wrap( dataExtractorFactory -> fail(), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java index 8f4aad57c3f..c9a2e8712e2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractorFactoryTests.java @@ -64,8 +64,8 @@ public class AggregationDataExtractorFactoryTests extends ESTestCase { jobBuilder.setDataDescription(dataDescription); jobBuilder.setAnalysisConfig(analysisConfig); DatafeedConfig.Builder datafeedConfigBuilder = new DatafeedConfig.Builder("foo-feed", jobBuilder.getId()); - datafeedConfigBuilder.setAggregations(aggs); + datafeedConfigBuilder.setParsedAggregations(aggs); datafeedConfigBuilder.setIndices(Arrays.asList("my_index")); return new AggregationDataExtractorFactory(client, datafeedConfigBuilder.build(), jobBuilder.build(new Date())); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java index 3dc2364cc2a..77a8c936beb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorFactoryTests.java @@ -91,8 +91,8 @@ public class ChunkedDataExtractorFactoryTests extends ESTestCase { jobBuilder.setDataDescription(dataDescription); jobBuilder.setAnalysisConfig(analysisConfig); DatafeedConfig.Builder datafeedConfigBuilder = new DatafeedConfig.Builder("foo-feed", jobBuilder.getId()); - datafeedConfigBuilder.setAggregations(aggs); + datafeedConfigBuilder.setParsedAggregations(aggs); datafeedConfigBuilder.setIndices(Arrays.asList("my_index")); return new ChunkedDataExtractorFactory(client, datafeedConfigBuilder.build(), jobBuilder.build(new Date()), dataExtractorFactory); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index c86db02ca80..0ef76131bd6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -98,7 +98,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase { HistogramAggregationBuilder histogramAggregation = AggregationBuilders.histogram("time").interval(60000) .subAggregation(maxAggregation).field("time"); - configBuilder.setAggregations(AggregatorFactories.builder().addAggregator(histogramAggregation)); + configBuilder.setParsedAggregations(AggregatorFactories.builder().addAggregator(histogramAggregation)); configBuilder.setFrequency(TimeValue.timeValueMinutes(2)); DatafeedConfig config = configBuilder.build(); PutDatafeedAction.Request putDatafeedRequest = new PutDatafeedAction.Request(config); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java index 06751e97ab7..914a029c0c4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -388,7 +388,7 @@ public class IndexAuditTrail implements AuditTrail, ClusterStateListener { indices.stream().map(imd -> imd.getIndex().getName()).collect(Collectors.toList())); } IndexMetaData indexMetaData = indices.get(0); - MappingMetaData docMapping = indexMetaData.mapping("doc"); + MappingMetaData docMapping = indexMetaData.getMappings().get("doc"); if (docMapping == null) { if (indexToRemoteCluster || state.nodes().isLocalNodeElectedMaster() || hasStaleMessage()) { putAuditIndexMappingsAndStart(index); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java index ed82808af76..f1f3993261e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/IndexPrivilegeTests.java @@ -492,13 +492,13 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase { assertAccessIsAllowed("admin", "GET", "/" + index + "/_search"); assertAccessIsAllowed("admin", "GET", "/" + index + "/foo/1"); assertAccessIsAllowed(user, "GET", "/" + index + "/foo/1/_explain", "{ \"query\" : { \"match_all\" : {} } }"); - assertAccessIsAllowed(user, "GET", "/" + index + "/foo/1/_termvector"); + assertAccessIsAllowed(user, "GET", "/" + index + "/foo/1/_termvectors"); assertUserIsAllowed(user, "search", index); } else { assertAccessIsDenied(user, "GET", "/" + index + "/_count"); assertAccessIsDenied(user, "GET", "/" + index + "/_search"); assertAccessIsDenied(user, "GET", "/" + index + "/foo/1/_explain", "{ \"query\" : { \"match_all\" : {} } }"); - assertAccessIsDenied(user, "GET", "/" + index + "/foo/1/_termvector"); + assertAccessIsDenied(user, "GET", "/" + index + "/foo/1/_termvectors"); assertUserIsDenied(user, "search", index); } break; diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java index 4ba3875cac0..1538f5302d6 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java @@ -59,11 +59,9 @@ public class UserFunctionIT extends ESRestTestCase { private void setUpUsers() throws IOException { int usersCount = name.getMethodName().startsWith("testSingle") ? 1 : randomIntBetween(5, 15); users = new ArrayList(usersCount); - - for(int i = 0; i < usersCount; i++) { - String randomUserName = randomAlphaOfLengthBetween(1, 15); - users.add(randomUserName); - createUser(randomUserName, MINIMAL_ACCESS_ROLE); + users.addAll(randomUnique(() -> randomAlphaOfLengthBetween(1, 15), usersCount)); + for (String user : users) { + createUser(user, MINIMAL_ACCESS_ROLE); } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json index de3591d6026..d9903ff8dc4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ilm.remove_policy.json @@ -4,7 +4,7 @@ "methods": [ "POST" ], "url": { "path": "/{index}/_ilm/remove", - "paths": ["/{index}/_ilm/remove", "/_ilm/remove"], + "paths": ["/{index}/_ilm/remove"], "parts": { "index": { "type" : "string",