Merge remote-tracking branch 'elastic/master' into zen2
This commit is contained in:
commit
70c361ea5a
|
@ -164,6 +164,10 @@ if (project != rootProject) {
|
|||
apply plugin: 'nebula.maven-base-publish'
|
||||
apply plugin: 'nebula.maven-scm'
|
||||
|
||||
// we need to apply these again to override the build plugin
|
||||
targetCompatibility = "10"
|
||||
sourceCompatibility = "10"
|
||||
|
||||
// groovydoc succeeds, but has some weird internal exception...
|
||||
groovydoc.enabled = false
|
||||
|
||||
|
|
|
@ -217,7 +217,7 @@ class PrecommitTasks {
|
|||
private static Task configureNamingConventions(Project project) {
|
||||
if (project.sourceSets.findByName("test")) {
|
||||
Task namingConventionsTask = project.tasks.create('namingConventions', NamingConventionsTask)
|
||||
namingConventionsTask.javaHome = project.runtimeJavaHome
|
||||
namingConventionsTask.javaHome = project.compilerJavaHome
|
||||
return namingConventionsTask
|
||||
}
|
||||
return null
|
||||
|
|
|
@ -41,7 +41,7 @@ import java.io.File;
|
|||
*/
|
||||
public class GradleServicesAdapter {
|
||||
|
||||
public final Project project;
|
||||
private final Project project;
|
||||
|
||||
public GradleServicesAdapter(Project project) {
|
||||
this.project = project;
|
||||
|
|
|
@ -20,17 +20,23 @@ package org.elasticsearch.gradle;
|
|||
|
||||
public enum Distribution {
|
||||
|
||||
INTEG_TEST("integ-test"),
|
||||
ZIP("elasticsearch"),
|
||||
ZIP_OSS("elasticsearch-oss");
|
||||
INTEG_TEST("integ-test", "zip"),
|
||||
ZIP("elasticsearch", "zip"),
|
||||
ZIP_OSS("elasticsearch-oss", "zip");
|
||||
|
||||
private final String fileName;
|
||||
private final String fileExtension;
|
||||
|
||||
Distribution(String name) {
|
||||
Distribution(String name, String fileExtension) {
|
||||
this.fileName = name;
|
||||
this.fileExtension = fileExtension;
|
||||
}
|
||||
|
||||
public String getFileName() {
|
||||
return fileName;
|
||||
}
|
||||
|
||||
public String getFileExtension() {
|
||||
return fileExtension;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,25 +20,67 @@ package org.elasticsearch.gradle.testclusters;
|
|||
|
||||
import org.elasticsearch.GradleServicesAdapter;
|
||||
import org.elasticsearch.gradle.Distribution;
|
||||
import org.elasticsearch.gradle.Version;
|
||||
import org.gradle.api.logging.Logger;
|
||||
import org.gradle.api.logging.Logging;
|
||||
import org.gradle.internal.os.OperatingSystem;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.util.Arrays;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
|
||||
public class ElasticsearchNode {
|
||||
|
||||
private final Logger logger = Logging.getLogger(ElasticsearchNode.class);
|
||||
private final String name;
|
||||
private final GradleServicesAdapter services;
|
||||
private final AtomicBoolean configurationFrozen = new AtomicBoolean(false);
|
||||
private final Logger logger = Logging.getLogger(ElasticsearchNode.class);
|
||||
private final File artifactsExtractDir;
|
||||
private final File workingDir;
|
||||
|
||||
private static final int ES_DESTROY_TIMEOUT = 20;
|
||||
private static final TimeUnit ES_DESTROY_TIMEOUT_UNIT = TimeUnit.SECONDS;
|
||||
private static final int NODE_UP_TIMEOUT = 30;
|
||||
private static final TimeUnit NODE_UP_TIMEOUT_UNIT = TimeUnit.SECONDS;
|
||||
private final LinkedHashMap<String, Predicate<ElasticsearchNode>> waitConditions;
|
||||
|
||||
private Distribution distribution;
|
||||
private String version;
|
||||
private File javaHome;
|
||||
private volatile Process esProcess;
|
||||
private final String path;
|
||||
|
||||
public ElasticsearchNode(String name, GradleServicesAdapter services) {
|
||||
ElasticsearchNode(String path, String name, GradleServicesAdapter services, File artifactsExtractDir, File workingDirBase) {
|
||||
this.path = path;
|
||||
this.name = name;
|
||||
this.services = services;
|
||||
this.artifactsExtractDir = artifactsExtractDir;
|
||||
this.workingDir = new File(workingDirBase, safeName(name));
|
||||
this.waitConditions = new LinkedHashMap<>();
|
||||
waitConditions.put("http ports file", node -> node.getHttpPortsFile().exists());
|
||||
waitConditions.put("transport ports file", node -> node.getTransportPortFile().exists());
|
||||
waitForUri("cluster health yellow", "/_cluster/health?wait_for_nodes=>=1&wait_for_status=yellow");
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
|
@ -50,6 +92,7 @@ public class ElasticsearchNode {
|
|||
}
|
||||
|
||||
public void setVersion(String version) {
|
||||
requireNonNull(version, "null version passed when configuring test cluster `" + this + "`");
|
||||
checkFrozen();
|
||||
this.version = version;
|
||||
}
|
||||
|
@ -59,22 +102,258 @@ public class ElasticsearchNode {
|
|||
}
|
||||
|
||||
public void setDistribution(Distribution distribution) {
|
||||
requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`");
|
||||
checkFrozen();
|
||||
this.distribution = distribution;
|
||||
}
|
||||
|
||||
void start() {
|
||||
logger.info("Starting `{}`", this);
|
||||
}
|
||||
|
||||
void stop(boolean tailLogs) {
|
||||
logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs);
|
||||
}
|
||||
|
||||
public void freeze() {
|
||||
requireNonNull(distribution, "null distribution passed when configuring test cluster `" + this + "`");
|
||||
requireNonNull(version, "null version passed when configuring test cluster `" + this + "`");
|
||||
logger.info("Locking configuration of `{}`", this);
|
||||
configurationFrozen.set(true);
|
||||
Objects.requireNonNull(version, "Version of test cluster `" + this + "` can't be null");
|
||||
}
|
||||
|
||||
public void setJavaHome(File javaHome) {
|
||||
requireNonNull(javaHome, "null javaHome passed when configuring test cluster `" + this + "`");
|
||||
checkFrozen();
|
||||
if (javaHome.exists() == false) {
|
||||
throw new TestClustersException("java home for `" + this + "` does not exists: `" + javaHome + "`");
|
||||
}
|
||||
this.javaHome = javaHome;
|
||||
}
|
||||
|
||||
public File getJavaHome() {
|
||||
return javaHome;
|
||||
}
|
||||
|
||||
private void waitForUri(String description, String uri) {
|
||||
waitConditions.put(description, (node) -> {
|
||||
try {
|
||||
URL url = new URL("http://" + this.getHttpPortInternal().get(0) + uri);
|
||||
HttpURLConnection con = (HttpURLConnection) url.openConnection();
|
||||
con.setRequestMethod("GET");
|
||||
con.setConnectTimeout(500);
|
||||
con.setReadTimeout(500);
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(con.getInputStream()))) {
|
||||
String response = reader.lines().collect(Collectors.joining("\n"));
|
||||
logger.info("{} -> {} ->\n{}", this, uri, response);
|
||||
}
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Connection attempt to " + this + " failed", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
synchronized void start() {
|
||||
logger.info("Starting `{}`", this);
|
||||
|
||||
File distroArtifact = new File(
|
||||
new File(artifactsExtractDir, distribution.getFileExtension()),
|
||||
distribution.getFileName() + "-" + getVersion()
|
||||
);
|
||||
if (distroArtifact.exists() == false) {
|
||||
throw new TestClustersException("Can not start " + this + ", missing: " + distroArtifact);
|
||||
}
|
||||
if (distroArtifact.isDirectory() == false) {
|
||||
throw new TestClustersException("Can not start " + this + ", is not a directory: " + distroArtifact);
|
||||
}
|
||||
services.sync(spec -> {
|
||||
spec.from(new File(distroArtifact, "config"));
|
||||
spec.into(getConfigFile().getParent());
|
||||
});
|
||||
configure();
|
||||
startElasticsearchProcess(distroArtifact);
|
||||
}
|
||||
|
||||
private void startElasticsearchProcess(File distroArtifact) {
|
||||
logger.info("Running `bin/elasticsearch` in `{}` for {}", workingDir, this);
|
||||
final ProcessBuilder processBuilder = new ProcessBuilder();
|
||||
if (OperatingSystem.current().isWindows()) {
|
||||
processBuilder.command(
|
||||
"cmd", "/c",
|
||||
new File(distroArtifact, "\\bin\\elasticsearch.bat").getAbsolutePath()
|
||||
);
|
||||
} else {
|
||||
processBuilder.command(
|
||||
new File(distroArtifact.getAbsolutePath(), "bin/elasticsearch").getAbsolutePath()
|
||||
);
|
||||
}
|
||||
try {
|
||||
processBuilder.directory(workingDir);
|
||||
Map<String, String> environment = processBuilder.environment();
|
||||
// Don't inherit anything from the environment for as that would lack reproductability
|
||||
environment.clear();
|
||||
if (javaHome != null) {
|
||||
environment.put("JAVA_HOME", getJavaHome().getAbsolutePath());
|
||||
} else if (System.getenv().get("JAVA_HOME") != null) {
|
||||
logger.warn("{}: No java home configured will use it from environment: {}",
|
||||
this, System.getenv().get("JAVA_HOME")
|
||||
);
|
||||
environment.put("JAVA_HOME", System.getenv().get("JAVA_HOME"));
|
||||
} else {
|
||||
logger.warn("{}: No javaHome configured, will rely on default java detection", this);
|
||||
}
|
||||
environment.put("ES_PATH_CONF", getConfigFile().getParentFile().getAbsolutePath());
|
||||
environment.put("ES_JAVA_OPTIONS", "-Xms512m -Xmx512m");
|
||||
// don't buffer all in memory, make sure we don't block on the default pipes
|
||||
processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(getStdErrFile()));
|
||||
processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(getStdoutFile()));
|
||||
esProcess = processBuilder.start();
|
||||
} catch (IOException e) {
|
||||
throw new TestClustersException("Failed to start ES process for " + this, e);
|
||||
}
|
||||
}
|
||||
|
||||
public String getHttpSocketURI() {
|
||||
waitForAllConditions();
|
||||
return getHttpPortInternal().get(0);
|
||||
}
|
||||
|
||||
public String getTransportPortURI() {
|
||||
waitForAllConditions();
|
||||
return getTransportPortInternal().get(0);
|
||||
}
|
||||
|
||||
synchronized void stop(boolean tailLogs) {
|
||||
if (esProcess == null && tailLogs) {
|
||||
// This is a special case. If start() throws an exception the plugin will still call stop
|
||||
// Another exception here would eat the orriginal.
|
||||
return;
|
||||
}
|
||||
logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs);
|
||||
requireNonNull(esProcess, "Can't stop `" + this + "` as it was not started or already stopped.");
|
||||
stopHandle(esProcess.toHandle());
|
||||
if (tailLogs) {
|
||||
logFileContents("Standard output of node", getStdoutFile());
|
||||
logFileContents("Standard error of node", getStdErrFile());
|
||||
}
|
||||
esProcess = null;
|
||||
}
|
||||
|
||||
private void stopHandle(ProcessHandle processHandle) {
|
||||
// Stop all children first, ES could actually be a child when there's some wrapper process like on Windows.
|
||||
if (processHandle.isAlive()) {
|
||||
processHandle.children().forEach(this::stopHandle);
|
||||
}
|
||||
logProcessInfo("Terminating elasticsearch process:", processHandle.info());
|
||||
if (processHandle.isAlive()) {
|
||||
processHandle.destroy();
|
||||
} else {
|
||||
logger.info("Process was not running when we tried to terminate it.");
|
||||
}
|
||||
waitForProcessToExit(processHandle);
|
||||
if (processHandle.isAlive()) {
|
||||
logger.info("process did not terminate after {} {}, stopping it forcefully",
|
||||
ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT
|
||||
);
|
||||
processHandle.destroyForcibly();
|
||||
}
|
||||
waitForProcessToExit(processHandle);
|
||||
if (processHandle.isAlive()) {
|
||||
throw new TestClustersException("Was not able to terminate es process");
|
||||
}
|
||||
}
|
||||
|
||||
private void logProcessInfo(String prefix, ProcessHandle.Info info) {
|
||||
logger.info(prefix + " commandLine:`{}` command:`{}` args:`{}`",
|
||||
info.commandLine().orElse("-"), info.command().orElse("-"),
|
||||
Arrays.stream(info.arguments().orElse(new String[]{}))
|
||||
.map(each -> "'" + each + "'")
|
||||
.collect(Collectors.joining(" "))
|
||||
);
|
||||
}
|
||||
|
||||
private void logFileContents(String description, File from) {
|
||||
logger.error("{} `{}`", description, this);
|
||||
try (BufferedReader reader = new BufferedReader(new FileReader(from))) {
|
||||
reader.lines()
|
||||
.map(line -> " [" + name + "]" + line)
|
||||
.forEach(logger::error);
|
||||
} catch (IOException e) {
|
||||
throw new TestClustersException("Error reading " + description, e);
|
||||
}
|
||||
}
|
||||
|
||||
private void waitForProcessToExit(ProcessHandle processHandle) {
|
||||
try {
|
||||
processHandle.onExit().get(ES_DESTROY_TIMEOUT, ES_DESTROY_TIMEOUT_UNIT);
|
||||
} catch (InterruptedException e) {
|
||||
logger.info("Interrupted while waiting for ES process", e);
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (ExecutionException e) {
|
||||
logger.info("Failure while waiting for process to exist", e);
|
||||
} catch (TimeoutException e) {
|
||||
logger.info("Timed out waiting for process to exit", e);
|
||||
}
|
||||
}
|
||||
|
||||
private File getConfigFile() {
|
||||
return new File(workingDir, "config/elasticsearch.yml");
|
||||
}
|
||||
|
||||
private File getConfPathData() {
|
||||
return new File(workingDir, "data");
|
||||
}
|
||||
|
||||
private File getConfPathSharedData() {
|
||||
return new File(workingDir, "sharedData");
|
||||
}
|
||||
|
||||
private File getConfPathRepo() {
|
||||
return new File(workingDir, "repo");
|
||||
}
|
||||
|
||||
private File getConfPathLogs() {
|
||||
return new File(workingDir, "logs");
|
||||
}
|
||||
|
||||
private File getStdoutFile() {
|
||||
return new File(getConfPathLogs(), "es.stdout.log");
|
||||
}
|
||||
|
||||
private File getStdErrFile() {
|
||||
return new File(getConfPathLogs(), "es.stderr.log");
|
||||
}
|
||||
|
||||
private void configure() {
|
||||
getConfigFile().getParentFile().mkdirs();
|
||||
getConfPathRepo().mkdirs();
|
||||
getConfPathData().mkdirs();
|
||||
getConfPathSharedData().mkdirs();
|
||||
getConfPathLogs().mkdirs();
|
||||
LinkedHashMap<String, String> config = new LinkedHashMap<>();
|
||||
config.put("cluster.name", "cluster-" + safeName(name));
|
||||
config.put("node.name", "node-" + safeName(name));
|
||||
config.put("path.repo", getConfPathRepo().getAbsolutePath());
|
||||
config.put("path.data", getConfPathData().getAbsolutePath());
|
||||
config.put("path.logs", getConfPathLogs().getAbsolutePath());
|
||||
config.put("path.shared_data", getConfPathSharedData().getAbsolutePath());
|
||||
config.put("node.attr.testattr", "test");
|
||||
config.put("node.portsfile", "true");
|
||||
config.put("http.port", "0");
|
||||
config.put("transport.tcp.port", "0");
|
||||
// Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
|
||||
config.put("cluster.routing.allocation.disk.watermark.low", "1b");
|
||||
config.put("cluster.routing.allocation.disk.watermark.high", "1b");
|
||||
// increase script compilation limit since tests can rapid-fire script compilations
|
||||
config.put("script.max_compilations_rate", "2048/1m");
|
||||
if (Version.fromString(version).getMajor() >= 6) {
|
||||
config.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b");
|
||||
}
|
||||
try {
|
||||
Files.write(
|
||||
getConfigFile().toPath(),
|
||||
config.entrySet().stream()
|
||||
.map(entry -> entry.getKey() + ": " + entry.getValue())
|
||||
.collect(Collectors.joining("\n"))
|
||||
.getBytes(StandardCharsets.UTF_8)
|
||||
);
|
||||
} catch (IOException e) {
|
||||
throw new TestClustersException("Could not write config file: " + getConfigFile(), e);
|
||||
}
|
||||
logger.info("Written config file:{} for {}", getConfigFile(), this);
|
||||
}
|
||||
|
||||
private void checkFrozen() {
|
||||
|
@ -83,21 +362,121 @@ public class ElasticsearchNode {
|
|||
}
|
||||
}
|
||||
|
||||
private static String safeName(String name) {
|
||||
return name
|
||||
.replaceAll("^[^a-zA-Z0-9]+", "")
|
||||
.replaceAll("[^a-zA-Z0-9]+", "-");
|
||||
}
|
||||
|
||||
private File getHttpPortsFile() {
|
||||
return new File(getConfPathLogs(), "http.ports");
|
||||
}
|
||||
|
||||
private File getTransportPortFile() {
|
||||
return new File(getConfPathLogs(), "transport.ports");
|
||||
}
|
||||
|
||||
private List<String> getTransportPortInternal() {
|
||||
File transportPortFile = getTransportPortFile();
|
||||
try {
|
||||
return readPortsFile(getTransportPortFile());
|
||||
} catch (IOException e) {
|
||||
throw new TestClustersException(
|
||||
"Failed to read transport ports file: " + transportPortFile + " for " + this, e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private List<String> getHttpPortInternal() {
|
||||
File httpPortsFile = getHttpPortsFile();
|
||||
try {
|
||||
return readPortsFile(getHttpPortsFile());
|
||||
} catch (IOException e) {
|
||||
throw new TestClustersException(
|
||||
"Failed to read http ports file: " + httpPortsFile + " for " + this, e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private List<String> readPortsFile(File file) throws IOException {
|
||||
try (BufferedReader reader = new BufferedReader(new FileReader(file))) {
|
||||
return reader.lines()
|
||||
.map(String::trim)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
}
|
||||
|
||||
private void waitForAllConditions() {
|
||||
requireNonNull(esProcess, "Can't wait for `" + this + "` as it was stopped.");
|
||||
long startedAt = System.currentTimeMillis();
|
||||
logger.info("Starting to wait for cluster to come up");
|
||||
waitConditions.forEach((description, predicate) -> {
|
||||
long thisConditionStartedAt = System.currentTimeMillis();
|
||||
boolean conditionMet = false;
|
||||
Throwable lastException = null;
|
||||
while (
|
||||
System.currentTimeMillis() - startedAt < MILLISECONDS.convert(NODE_UP_TIMEOUT, NODE_UP_TIMEOUT_UNIT)
|
||||
) {
|
||||
if (esProcess.isAlive() == false) {
|
||||
throw new TestClustersException(
|
||||
"process was found dead while waiting for " + description + ", " + this
|
||||
);
|
||||
}
|
||||
try {
|
||||
if(predicate.test(this)) {
|
||||
conditionMet = true;
|
||||
break;
|
||||
}
|
||||
} catch (TestClustersException e) {
|
||||
throw new TestClustersException(e);
|
||||
} catch (Exception e) {
|
||||
if (lastException == null) {
|
||||
lastException = e;
|
||||
} else {
|
||||
e.addSuppressed(lastException);
|
||||
lastException = e;
|
||||
}
|
||||
}
|
||||
try {
|
||||
Thread.sleep(500);
|
||||
}
|
||||
catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
if (conditionMet == false) {
|
||||
String message = "`" + this + "` failed to wait for " + description + " after " +
|
||||
NODE_UP_TIMEOUT + " " + NODE_UP_TIMEOUT_UNIT;
|
||||
if (lastException == null) {
|
||||
throw new TestClustersException(message);
|
||||
} else {
|
||||
throw new TestClustersException(message, lastException);
|
||||
}
|
||||
}
|
||||
logger.info(
|
||||
"{}: {} took {} seconds",
|
||||
this, description,
|
||||
SECONDS.convert(System.currentTimeMillis() - thisConditionStartedAt, MILLISECONDS)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
ElasticsearchNode that = (ElasticsearchNode) o;
|
||||
return Objects.equals(name, that.name);
|
||||
return Objects.equals(name, that.name) &&
|
||||
Objects.equals(path, that.path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name);
|
||||
return Objects.hash(name, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ElasticsearchNode{name='" + name + "'}";
|
||||
return "node{" + path + ":" + name + "}";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.gradle.testclusters;
|
||||
|
||||
class TestClustersException extends RuntimeException {
|
||||
TestClustersException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
TestClustersException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
TestClustersException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
}
|
|
@ -40,6 +40,9 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class TestClustersPlugin implements Plugin<Project> {
|
||||
|
@ -48,14 +51,17 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
private static final String NODE_EXTENSION_NAME = "testClusters";
|
||||
static final String HELPER_CONFIGURATION_NAME = "testclusters";
|
||||
private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts";
|
||||
private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1;
|
||||
private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES;
|
||||
|
||||
private final Logger logger = Logging.getLogger(TestClustersPlugin.class);
|
||||
private static final Logger logger = Logging.getLogger(TestClustersPlugin.class);
|
||||
|
||||
// this is static because we need a single mapping across multi project builds, as some of the listeners we use,
|
||||
// like task graph are singletons across multi project builds.
|
||||
private static final Map<Task, List<ElasticsearchNode>> usedClusters = new ConcurrentHashMap<>();
|
||||
private static final Map<ElasticsearchNode, Integer> claimsInventory = new ConcurrentHashMap<>();
|
||||
private static final Set<ElasticsearchNode> runningClusters = Collections.synchronizedSet(new HashSet<>());
|
||||
private static volatile ExecutorService executorService;
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
|
@ -106,6 +112,9 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
// After each task we determine if there are clusters that are no longer needed.
|
||||
configureStopClustersHook(project);
|
||||
|
||||
// configure hooks to make sure no test cluster processes survive the build
|
||||
configureCleanupHooks(project);
|
||||
|
||||
// Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the
|
||||
// configuration so the user doesn't have to repeat this.
|
||||
autoConfigureClusterDependencies(project, rootProject, container);
|
||||
|
@ -117,8 +126,11 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
NamedDomainObjectContainer<ElasticsearchNode> container = project.container(
|
||||
ElasticsearchNode.class,
|
||||
name -> new ElasticsearchNode(
|
||||
project.getPath(),
|
||||
name,
|
||||
GradleServicesAdapter.getInstance(project)
|
||||
GradleServicesAdapter.getInstance(project),
|
||||
SyncTestClustersConfiguration.getTestClustersConfigurationExtractDir(project),
|
||||
new File(project.getBuildDir(), "testclusters")
|
||||
)
|
||||
);
|
||||
project.getExtensions().add(NODE_EXTENSION_NAME, container);
|
||||
|
@ -137,14 +149,14 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
private void createUseClusterTaskExtension(Project project) {
|
||||
private static void createUseClusterTaskExtension(Project project) {
|
||||
// register an extension for all current and future tasks, so that any task can declare that it wants to use a
|
||||
// specific cluster.
|
||||
project.getTasks().all((Task task) ->
|
||||
task.getExtensions().findByType(ExtraPropertiesExtension.class)
|
||||
.set(
|
||||
"useCluster",
|
||||
new Closure<Void>(this, task) {
|
||||
new Closure<Void>(project, task) {
|
||||
public void doCall(ElasticsearchNode node) {
|
||||
Object thisObject = this.getThisObject();
|
||||
if (thisObject instanceof Task == false) {
|
||||
|
@ -160,7 +172,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
private void configureClaimClustersHook(Project project) {
|
||||
private static void configureClaimClustersHook(Project project) {
|
||||
project.getGradle().getTaskGraph().whenReady(taskExecutionGraph ->
|
||||
taskExecutionGraph.getAllTasks()
|
||||
.forEach(task ->
|
||||
|
@ -174,7 +186,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
private void configureStartClustersHook(Project project) {
|
||||
private static void configureStartClustersHook(Project project) {
|
||||
project.getGradle().addListener(
|
||||
new TaskActionListener() {
|
||||
@Override
|
||||
|
@ -196,7 +208,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
private void configureStopClustersHook(Project project) {
|
||||
private static void configureStopClustersHook(Project project) {
|
||||
project.getGradle().addListener(
|
||||
new TaskExecutionListener() {
|
||||
@Override
|
||||
|
@ -226,6 +238,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
.filter(entry -> runningClusters.contains(entry.getKey()))
|
||||
.map(Map.Entry::getKey)
|
||||
.collect(Collectors.toList());
|
||||
runningClusters.removeAll(stoppable);
|
||||
}
|
||||
stoppable.forEach(each -> each.stop(false));
|
||||
}
|
||||
|
@ -251,7 +264,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
project.getExtensions().getByName(NODE_EXTENSION_NAME);
|
||||
}
|
||||
|
||||
private void autoConfigureClusterDependencies(
|
||||
private static void autoConfigureClusterDependencies(
|
||||
Project project,
|
||||
Project rootProject,
|
||||
NamedDomainObjectContainer<ElasticsearchNode> container
|
||||
|
@ -272,6 +285,59 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
}));
|
||||
}
|
||||
|
||||
private static void configureCleanupHooks(Project project) {
|
||||
synchronized (runningClusters) {
|
||||
if (executorService == null || executorService.isTerminated()) {
|
||||
executorService = Executors.newSingleThreadExecutor();
|
||||
} else {
|
||||
throw new IllegalStateException("Trying to configure executor service twice");
|
||||
}
|
||||
}
|
||||
// When the Gradle daemon is used, it will interrupt all threads when the build concludes.
|
||||
executorService.submit(() -> {
|
||||
while (true) {
|
||||
try {
|
||||
Thread.sleep(Long.MAX_VALUE);
|
||||
} catch (InterruptedException interrupted) {
|
||||
shutDownAllClusters();
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
project.getGradle().buildFinished(buildResult -> {
|
||||
logger.info("Build finished");
|
||||
shutdownExecutorService();
|
||||
});
|
||||
// When the Daemon is not used, or runs into issues, rely on a shutdown hook
|
||||
// When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptable
|
||||
// thread in the build) process will be stopped eventually when the daemon dies.
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(TestClustersPlugin::shutDownAllClusters));
|
||||
}
|
||||
|
||||
private static void shutdownExecutorService() {
|
||||
executorService.shutdownNow();
|
||||
try {
|
||||
if (executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) {
|
||||
throw new IllegalStateException(
|
||||
"Failed to shut down executor service after " +
|
||||
EXECUTOR_SHUTDOWN_TIMEOUT + " " + EXECUTOR_SHUTDOWN_TIMEOUT_UNIT
|
||||
);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
logger.info("Wait for testclusters shutdown interrupted", e);
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
private static void shutDownAllClusters() {
|
||||
logger.info("Shutting down all test clusters", new RuntimeException());
|
||||
synchronized (runningClusters) {
|
||||
runningClusters.forEach(each -> each.stop(true));
|
||||
runningClusters.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -154,10 +154,11 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
|
|||
for (String each : text) {
|
||||
int i = output.indexOf(each);
|
||||
if (i == -1 ) {
|
||||
fail("Expected `" + text + "` to appear at most once, but it didn't at all.\n\nOutout is:\n"+ output);
|
||||
fail("Expected \n```" + each + "```\nto appear at most once, but it didn't at all.\n\nOutout is:\n"+ output
|
||||
);
|
||||
}
|
||||
if(output.indexOf(each) != output.lastIndexOf(each)) {
|
||||
fail("Expected `" + text + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output);
|
||||
fail("Expected `" + each + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,8 +76,8 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
|||
assertOutputContains(
|
||||
result.getOutput(),
|
||||
"> Task :user1",
|
||||
"Starting `ElasticsearchNode{name='myTestCluster'}`",
|
||||
"Stopping `ElasticsearchNode{name='myTestCluster'}`"
|
||||
"Starting `node{::myTestCluster}`",
|
||||
"Stopping `node{::myTestCluster}`"
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,6 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
|||
.withPluginClasspath()
|
||||
.build();
|
||||
assertTaskSuccessful(result, ":user1", ":user2");
|
||||
|
||||
assertStartedAndStoppedOnce(result);
|
||||
}
|
||||
|
||||
|
@ -98,7 +97,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
|||
assertStartedAndStoppedOnce(result);
|
||||
assertOutputContains(
|
||||
result.getOutput(),
|
||||
"Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true",
|
||||
"Stopping `node{::myTestCluster}`, tailLogs: true",
|
||||
"Execution failed for task ':itAlwaysFails'."
|
||||
);
|
||||
}
|
||||
|
@ -110,7 +109,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
|||
assertStartedAndStoppedOnce(result);
|
||||
assertOutputContains(
|
||||
result.getOutput(),
|
||||
"Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true",
|
||||
"Stopping `node{::myTestCluster}`, tailLogs: true",
|
||||
"Execution failed for task ':itAlwaysFails'."
|
||||
);
|
||||
}
|
||||
|
@ -146,8 +145,8 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
|||
private void assertStartedAndStoppedOnce(BuildResult result) {
|
||||
assertOutputOnlyOnce(
|
||||
result.getOutput(),
|
||||
"Starting `ElasticsearchNode{name='myTestCluster'}`",
|
||||
"Stopping `ElasticsearchNode{name='myTestCluster'}`"
|
||||
"Starting `node{::myTestCluster}`",
|
||||
"Stopping `node{::myTestCluster}`"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,14 +18,14 @@ repositories {
|
|||
task user1 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doLast {
|
||||
println "user1 executing"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
||||
|
||||
task user2 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doLast {
|
||||
println "user2 executing"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,12 +10,12 @@ testClusters {
|
|||
task user1 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doFirst {
|
||||
println "$path"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
||||
task user2 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doFirst {
|
||||
println "$path"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,13 +12,13 @@ testClusters {
|
|||
task user1 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doFirst {
|
||||
println "$path"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
||||
|
||||
task user2 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doFirst {
|
||||
println "$path"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,13 +20,13 @@ testClusters {
|
|||
task user1 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doFirst {
|
||||
println "$path"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
||||
|
||||
task user2 {
|
||||
useCluster testClusters.myTestCluster
|
||||
doFirst {
|
||||
println "$path"
|
||||
println "$path: Cluster running @ ${testClusters.myTestCluster.httpSocketURI}"
|
||||
}
|
||||
}
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
|
||||
import org.elasticsearch.client.ccr.PauseFollowRequest;
|
||||
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.PutFollowRequest;
|
||||
|
@ -291,7 +293,7 @@ public final class CcrClient {
|
|||
}
|
||||
|
||||
/**
|
||||
* Deletes an auto follow pattern.
|
||||
* Asynchronously deletes an auto follow pattern.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-delete-auto-follow-pattern.html">
|
||||
* the docs</a> for more.
|
||||
|
@ -313,4 +315,49 @@ public final class CcrClient {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets an auto follow pattern.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html">
|
||||
* the docs</a> for more.
|
||||
*
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public GetAutoFollowPatternResponse getAutoFollowPattern(GetAutoFollowPatternRequest request,
|
||||
RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(
|
||||
request,
|
||||
CcrRequestConverters::getAutoFollowPattern,
|
||||
options,
|
||||
GetAutoFollowPatternResponse::fromXContent,
|
||||
Collections.emptySet()
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously gets an auto follow pattern.
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-get-auto-follow-pattern.html">
|
||||
* the docs</a> for more.
|
||||
*
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void getAutoFollowPatternAsync(GetAutoFollowPatternRequest request,
|
||||
RequestOptions options,
|
||||
ActionListener<GetAutoFollowPatternResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(
|
||||
request,
|
||||
CcrRequestConverters::getAutoFollowPattern,
|
||||
options,
|
||||
GetAutoFollowPatternResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,9 +20,11 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.PauseFollowRequest;
|
||||
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.PutFollowRequest;
|
||||
|
@ -90,4 +92,12 @@ final class CcrRequestConverters {
|
|||
return new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
}
|
||||
|
||||
static Request getAutoFollowPattern(GetAutoFollowPatternRequest getAutoFollowPatternRequest) {
|
||||
String endpoint = new RequestConverters.EndpointBuilder()
|
||||
.addPathPartAsIs("_ccr", "auto_follow")
|
||||
.addPathPart(getAutoFollowPatternRequest.getName())
|
||||
.build();
|
||||
return new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.client.Validatable;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request class for get auto follow pattern api.
|
||||
*/
|
||||
public final class GetAutoFollowPatternRequest implements Validatable {
|
||||
|
||||
private final String name;
|
||||
|
||||
/**
|
||||
* Get all auto follow patterns
|
||||
*/
|
||||
public GetAutoFollowPatternRequest() {
|
||||
this.name = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get auto follow pattern with the specified name
|
||||
*
|
||||
* @param name The name of the auto follow pattern to get
|
||||
*/
|
||||
public GetAutoFollowPatternRequest(String name) {
|
||||
this.name = Objects.requireNonNull(name);
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public final class GetAutoFollowPatternResponse {
|
||||
|
||||
public static GetAutoFollowPatternResponse fromXContent(final XContentParser parser) throws IOException {
|
||||
final Map<String, Pattern> patterns = new HashMap<>();
|
||||
for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
|
||||
if (token == Token.FIELD_NAME) {
|
||||
final String name = parser.currentName();
|
||||
final Pattern pattern = Pattern.PARSER.parse(parser, null);
|
||||
patterns.put(name, pattern);
|
||||
}
|
||||
}
|
||||
return new GetAutoFollowPatternResponse(patterns);
|
||||
}
|
||||
|
||||
private final Map<String, Pattern> patterns;
|
||||
|
||||
GetAutoFollowPatternResponse(Map<String, Pattern> patterns) {
|
||||
this.patterns = Collections.unmodifiableMap(patterns);
|
||||
}
|
||||
|
||||
public Map<String, Pattern> getPatterns() {
|
||||
return patterns;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
GetAutoFollowPatternResponse that = (GetAutoFollowPatternResponse) o;
|
||||
return Objects.equals(patterns, that.patterns);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(patterns);
|
||||
}
|
||||
|
||||
public static class Pattern extends FollowConfig {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<Pattern, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"pattern", args -> new Pattern((String) args[0], (List<String>) args[1], (String) args[2]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.REMOTE_CLUSTER_FIELD);
|
||||
PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD);
|
||||
PARSER.declareInt(Pattern::setMaxReadRequestOperationCount, FollowConfig.MAX_READ_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
Pattern::setMaxReadRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_READ_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_READ_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(Pattern::setMaxOutstandingReadRequests, FollowConfig.MAX_OUTSTANDING_READ_REQUESTS);
|
||||
PARSER.declareInt(Pattern::setMaxWriteRequestOperationCount, FollowConfig.MAX_WRITE_REQUEST_OPERATION_COUNT);
|
||||
PARSER.declareField(
|
||||
Pattern::setMaxWriteRequestSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_REQUEST_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_REQUEST_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareInt(Pattern::setMaxOutstandingWriteRequests, FollowConfig.MAX_OUTSTANDING_WRITE_REQUESTS);
|
||||
PARSER.declareInt(Pattern::setMaxWriteBufferCount, FollowConfig.MAX_WRITE_BUFFER_COUNT);
|
||||
PARSER.declareField(
|
||||
Pattern::setMaxWriteBufferSize,
|
||||
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_BUFFER_SIZE.getPreferredName()),
|
||||
PutFollowRequest.MAX_WRITE_BUFFER_SIZE,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
Pattern::setMaxRetryDelay,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.MAX_RETRY_DELAY_FIELD.getPreferredName()),
|
||||
PutFollowRequest.MAX_RETRY_DELAY_FIELD,
|
||||
ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(
|
||||
Pattern::setReadPollTimeout,
|
||||
(p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.READ_POLL_TIMEOUT.getPreferredName()),
|
||||
PutFollowRequest.READ_POLL_TIMEOUT,
|
||||
ObjectParser.ValueType.STRING);
|
||||
}
|
||||
|
||||
private final String remoteCluster;
|
||||
private final List<String> leaderIndexPatterns;
|
||||
private final String followIndexNamePattern;
|
||||
|
||||
Pattern(String remoteCluster, List<String> leaderIndexPatterns, String followIndexNamePattern) {
|
||||
this.remoteCluster = remoteCluster;
|
||||
this.leaderIndexPatterns = leaderIndexPatterns;
|
||||
this.followIndexNamePattern = followIndexNamePattern;
|
||||
}
|
||||
|
||||
public String getRemoteCluster() {
|
||||
return remoteCluster;
|
||||
}
|
||||
|
||||
public List<String> getLeaderIndexPatterns() {
|
||||
return leaderIndexPatterns;
|
||||
}
|
||||
|
||||
public String getFollowIndexNamePattern() {
|
||||
return followIndexNamePattern;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (!super.equals(o)) return false;
|
||||
Pattern pattern = (Pattern) o;
|
||||
return Objects.equals(remoteCluster, pattern.remoteCluster) &&
|
||||
Objects.equals(leaderIndexPatterns, pattern.leaderIndexPatterns) &&
|
||||
Objects.equals(followIndexNamePattern, pattern.followIndexNamePattern);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(
|
||||
super.hashCode(),
|
||||
remoteCluster,
|
||||
leaderIndexPatterns,
|
||||
followIndexNamePattern
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -30,6 +30,8 @@ import org.elasticsearch.action.search.SearchRequest;
|
|||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
|
||||
import org.elasticsearch.client.ccr.PauseFollowRequest;
|
||||
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.PutFollowRequest;
|
||||
|
@ -37,6 +39,7 @@ import org.elasticsearch.client.ccr.PutFollowResponse;
|
|||
import org.elasticsearch.client.ccr.ResumeFollowRequest;
|
||||
import org.elasticsearch.client.ccr.UnfollowRequest;
|
||||
import org.elasticsearch.client.core.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.xcontent.ObjectPath;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
@ -48,11 +51,12 @@ import java.util.Map;
|
|||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class CCRIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@Before
|
||||
public void setupRemoteClusterConfig() throws IOException {
|
||||
public void setupRemoteClusterConfig() throws Exception {
|
||||
// Configure local cluster as remote cluster:
|
||||
// TODO: replace with nodes info highlevel rest client code when it is available:
|
||||
final Request request = new Request("GET", "/_nodes");
|
||||
|
@ -66,6 +70,14 @@ public class CCRIT extends ESRestHighLevelClientTestCase {
|
|||
ClusterUpdateSettingsResponse updateSettingsResponse =
|
||||
highLevelClient().cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT);
|
||||
assertThat(updateSettingsResponse.isAcknowledged(), is(true));
|
||||
|
||||
assertBusy(() -> {
|
||||
Map<?, ?> localConnection = (Map<?, ?>) toMap(client()
|
||||
.performRequest(new Request("GET", "/_remote/info")))
|
||||
.get("local");
|
||||
assertThat(localConnection, notNullValue());
|
||||
assertThat(localConnection.get("connected"), is(true));
|
||||
});
|
||||
}
|
||||
|
||||
public void testIndexFollowing() throws Exception {
|
||||
|
@ -129,7 +141,6 @@ public class CCRIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(unfollowResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35937")
|
||||
public void testAutoFollowing() throws Exception {
|
||||
CcrClient ccrClient = highLevelClient().ccr();
|
||||
PutAutoFollowPatternRequest putAutoFollowPatternRequest =
|
||||
|
@ -146,8 +157,26 @@ public class CCRIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
assertBusy(() -> {
|
||||
assertThat(indexExists("copy-logs-20200101"), is(true));
|
||||
// TODO: replace with HLRC follow stats when available:
|
||||
Map<String, Object> rsp = toMap(client().performRequest(new Request("GET", "/copy-logs-20200101/_ccr/stats")));
|
||||
String index = null;
|
||||
try {
|
||||
index = ObjectPath.eval("indices.0.index", rsp);
|
||||
} catch (Exception e){ }
|
||||
assertThat(index, equalTo("copy-logs-20200101"));
|
||||
});
|
||||
|
||||
GetAutoFollowPatternRequest getAutoFollowPatternRequest =
|
||||
randomBoolean() ? new GetAutoFollowPatternRequest("pattern1") : new GetAutoFollowPatternRequest();
|
||||
GetAutoFollowPatternResponse getAutoFollowPatternResponse =
|
||||
execute(getAutoFollowPatternRequest, ccrClient::getAutoFollowPattern, ccrClient::getAutoFollowPatternAsync);
|
||||
assertThat(getAutoFollowPatternResponse.getPatterns().size(), equalTo(1));
|
||||
GetAutoFollowPatternResponse.Pattern pattern = getAutoFollowPatternResponse.getPatterns().get("pattern1");
|
||||
assertThat(pattern, notNullValue());
|
||||
assertThat(pattern.getRemoteCluster(), equalTo(putAutoFollowPatternRequest.getRemoteCluster()));
|
||||
assertThat(pattern.getLeaderIndexPatterns(), equalTo(putAutoFollowPatternRequest.getLeaderIndexPatterns()));
|
||||
assertThat(pattern.getFollowIndexNamePattern(), equalTo(putAutoFollowPatternRequest.getFollowIndexNamePattern()));
|
||||
|
||||
// Cleanup:
|
||||
final DeleteAutoFollowPatternRequest deleteAutoFollowPatternRequest = new DeleteAutoFollowPatternRequest("pattern1");
|
||||
AcknowledgedResponse deleteAutoFollowPatternResponse =
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.ccr;
|
||||
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD;
|
||||
import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD;
|
||||
import static org.elasticsearch.client.ccr.PutFollowRequest.REMOTE_CLUSTER_FIELD;
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
|
||||
public class GetAutoFollowPatternResponseTests extends ESTestCase {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(this::createParser,
|
||||
this::createTestInstance,
|
||||
GetAutoFollowPatternResponseTests::toXContent,
|
||||
GetAutoFollowPatternResponse::fromXContent)
|
||||
.supportsUnknownFields(false)
|
||||
.test();
|
||||
}
|
||||
|
||||
private GetAutoFollowPatternResponse createTestInstance() {
|
||||
int numPatterns = randomIntBetween(0, 16);
|
||||
Map<String, GetAutoFollowPatternResponse.Pattern> patterns = new HashMap<>(numPatterns);
|
||||
for (int i = 0; i < numPatterns; i++) {
|
||||
GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern(
|
||||
randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4));
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setMaxRetryDelay(new TimeValue(randomNonNegativeLong()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
pattern.setReadPollTimeout(new TimeValue(randomNonNegativeLong()));
|
||||
}
|
||||
patterns.put(randomAlphaOfLength(4), pattern);
|
||||
}
|
||||
return new GetAutoFollowPatternResponse(patterns);
|
||||
}
|
||||
|
||||
public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
for (Map.Entry<String, GetAutoFollowPatternResponse.Pattern> entry : response.getPatterns().entrySet()) {
|
||||
builder.startObject(entry.getKey());
|
||||
GetAutoFollowPatternResponse.Pattern pattern = entry.getValue();
|
||||
builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster());
|
||||
builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns());
|
||||
if (pattern.getFollowIndexNamePattern()!= null) {
|
||||
builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern());
|
||||
}
|
||||
entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
|
@ -34,6 +34,9 @@ import org.elasticsearch.client.RequestOptions;
|
|||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern;
|
||||
import org.elasticsearch.client.ccr.PauseFollowRequest;
|
||||
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.PutFollowRequest;
|
||||
|
@ -501,6 +504,70 @@ public class CCRDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetAutoFollowPattern() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// Put auto follow pattern, so that we can get it:
|
||||
{
|
||||
final PutAutoFollowPatternRequest putRequest =
|
||||
new PutAutoFollowPatternRequest("my_pattern", "local", Collections.singletonList("logs-*"));
|
||||
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-request
|
||||
GetAutoFollowPatternRequest request =
|
||||
new GetAutoFollowPatternRequest("my_pattern"); // <1>
|
||||
// end::ccr-get-auto-follow-pattern-request
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-execute
|
||||
GetAutoFollowPatternResponse response = client.ccr()
|
||||
.getAutoFollowPattern(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-get-auto-follow-pattern-execute
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-response
|
||||
Map<String, Pattern> patterns = response.getPatterns();
|
||||
Pattern pattern = patterns.get("my_pattern"); // <1>
|
||||
pattern.getLeaderIndexPatterns();
|
||||
// end::ccr-get-auto-follow-pattern-response
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-execute-listener
|
||||
ActionListener<GetAutoFollowPatternResponse> listener =
|
||||
new ActionListener<GetAutoFollowPatternResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetAutoFollowPatternResponse
|
||||
response) { // <1>
|
||||
Map<String, Pattern> patterns = response.getPatterns();
|
||||
Pattern pattern = patterns.get("my_pattern");
|
||||
pattern.getLeaderIndexPatterns();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-get-auto-follow-pattern-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-execute-async
|
||||
client.ccr().getAutoFollowPatternAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-get-auto-follow-pattern-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// Cleanup:
|
||||
{
|
||||
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
|
||||
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
|
||||
assertThat(deleteResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
static Map<String, Object> toMap(Response response) throws IOException {
|
||||
return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
--
|
||||
:api: ccr-get-auto-follow-pattern
|
||||
:request: GetAutoFollowPatternRequest
|
||||
:response: GetAutoFollowPatternResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Auto Follow Pattern API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Auto Follow Pattern API allows you to get a specified auto follow pattern
|
||||
or all auto follow patterns.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the auto follow pattern to get.
|
||||
Use the default constructor to get all auto follow patterns.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ includes the requested auto follow pattern or
|
||||
all auto follow patterns if default constructor or request class was used.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Get the requested pattern from the list of returned patterns
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -472,6 +472,7 @@ The Java High Level REST Client supports the following CCR APIs:
|
|||
* <<{upid}-ccr-unfollow>>
|
||||
* <<{upid}-ccr-put-auto-follow-pattern>>
|
||||
* <<{upid}-ccr-delete-auto-follow-pattern>>
|
||||
* <<{upid}-ccr-get-auto-follow-pattern>>
|
||||
|
||||
include::ccr/put_follow.asciidoc[]
|
||||
include::ccr/pause_follow.asciidoc[]
|
||||
|
@ -479,6 +480,7 @@ include::ccr/resume_follow.asciidoc[]
|
|||
include::ccr/unfollow.asciidoc[]
|
||||
include::ccr/put_auto_follow_pattern.asciidoc[]
|
||||
include::ccr/delete_auto_follow_pattern.asciidoc[]
|
||||
include::ccr/get_auto_follow_pattern.asciidoc[]
|
||||
|
||||
== Index Lifecycle Management APIs
|
||||
|
||||
|
|
|
@ -11,8 +11,10 @@ score to documents returned from a query.
|
|||
User-defined parameters passed in as part of the query.
|
||||
|
||||
`doc` (`Map`, read-only)::
|
||||
Contains the fields of the current document where each field is a
|
||||
`List` of values.
|
||||
Contains the fields of the current document. For single-valued fields,
|
||||
the value can be accessed via `doc['fieldname'].value`. For multi-valued
|
||||
fields, this returns the first value; other values can be accessed
|
||||
via `doc['fieldname'].get(index)`
|
||||
|
||||
`_score` (`double` read-only)::
|
||||
The similarity score of the current document.
|
||||
|
@ -24,4 +26,33 @@ score to documents returned from a query.
|
|||
|
||||
*API*
|
||||
|
||||
The standard <<painless-api-reference, Painless API>> is available.
|
||||
The standard <<painless-api-reference, Painless API>> is available.
|
||||
|
||||
*Example*
|
||||
|
||||
To run this example, first follow the steps in
|
||||
<<painless-context-examples, context examples>>.
|
||||
|
||||
The following query finds all unsold seats, with lower 'row' values
|
||||
scored higher.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /seats/_search
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"query": {
|
||||
"match": { "sold": "false" }
|
||||
},
|
||||
"script_score" : {
|
||||
"script" : {
|
||||
"source": "1.0 / doc['row'].value"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:seats]
|
|
@ -15,6 +15,9 @@ documents in a query.
|
|||
`params` (`Map`, read-only)::
|
||||
User-defined parameters passed in at query-time.
|
||||
|
||||
`weight` (`float`, read-only)::
|
||||
The weight as calculated by a <<painless-weight-context,weight script>>
|
||||
|
||||
`query.boost` (`float`, read-only)::
|
||||
The boost value if provided by the query. If this is not provided the
|
||||
value is `1.0f`.
|
||||
|
@ -37,12 +40,23 @@ documents in a query.
|
|||
The total occurrences of the current term in the index.
|
||||
|
||||
`doc.length` (`long`, read-only)::
|
||||
The number of tokens the current document has in the current field.
|
||||
The number of tokens the current document has in the current field. This
|
||||
is decoded from the stored {ref}/norms.html[norms] and may be approximate for
|
||||
long fields
|
||||
|
||||
`doc.freq` (`long`, read-only)::
|
||||
The number of occurrences of the current term in the current
|
||||
document for the current field.
|
||||
|
||||
Note that the `query`, `field`, and `term` variables are also available to the
|
||||
<<painless-weight-context,weight context>>. They are more efficiently used
|
||||
there, as they are constant for all documents.
|
||||
|
||||
For queries that contain multiple terms, the script is called once for each
|
||||
term with that term's calculated weight, and the results are summed. Note that some
|
||||
terms might have a `doc.freq` value of `0` on a document, for example if a query
|
||||
uses synonyms.
|
||||
|
||||
*Return*
|
||||
|
||||
`double`::
|
||||
|
|
|
@ -10,8 +10,10 @@ Use a Painless script to
|
|||
User-defined parameters passed in as part of the query.
|
||||
|
||||
`doc` (`Map`, read-only)::
|
||||
Contains the fields of the current document where each field is a
|
||||
`List` of values.
|
||||
Contains the fields of the current document. For single-valued fields,
|
||||
the value can be accessed via `doc['fieldname'].value`. For multi-valued
|
||||
fields, this returns the first value; other values can be accessed
|
||||
via `doc['fieldname'].get(index)`
|
||||
|
||||
`_score` (`double` read-only)::
|
||||
The similarity score of the current document.
|
||||
|
@ -23,4 +25,37 @@ Use a Painless script to
|
|||
|
||||
*API*
|
||||
|
||||
The standard <<painless-api-reference, Painless API>> is available.
|
||||
The standard <<painless-api-reference, Painless API>> is available.
|
||||
|
||||
*Example*
|
||||
|
||||
To run this example, first follow the steps in
|
||||
<<painless-context-examples, context examples>>.
|
||||
|
||||
To sort results by the length of the `theatre` field, submit the following query:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
GET /_search
|
||||
{
|
||||
"query" : {
|
||||
"term" : { "sold" : "true" }
|
||||
},
|
||||
"sort" : {
|
||||
"_script" : {
|
||||
"type" : "number",
|
||||
"script" : {
|
||||
"lang": "painless",
|
||||
"source": "doc['theatre'].value.length() * params.factor",
|
||||
"params" : {
|
||||
"factor" : 1.1
|
||||
}
|
||||
},
|
||||
"order" : "asc"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
----
|
||||
// CONSOLE
|
||||
// TEST[setup:seats]
|
|
@ -3,8 +3,11 @@
|
|||
|
||||
Use a Painless script to create a
|
||||
{ref}/index-modules-similarity.html[weight] for use in a
|
||||
<<painless-similarity-context, similarity script>>. Weight is used to prevent
|
||||
recalculation of constants that remain the same across documents.
|
||||
<<painless-similarity-context, similarity script>>. The weight makes up the
|
||||
part of the similarity calculation that is independent of the document being
|
||||
scored, and so can be built up front and cached.
|
||||
|
||||
Queries that contain multiple terms calculate a separate weight for each term.
|
||||
|
||||
*Variables*
|
||||
|
||||
|
|
|
@ -27,9 +27,6 @@ or by adding the requested fields in the request body (see
|
|||
example below). Fields can also be specified with wildcards
|
||||
in similar way to the <<query-dsl-multi-match-query,multi match query>>
|
||||
|
||||
[WARNING]
|
||||
Note that the usage of `/_termvector` is deprecated in 2.0, and replaced by `/_termvectors`.
|
||||
|
||||
[float]
|
||||
=== Return values
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ PUT my_index/_doc/1?refresh
|
|||
|
||||
PUT my_index/_doc/2?refresh
|
||||
{
|
||||
"text": "This is a another question",
|
||||
"text": "This is another question",
|
||||
"my_join_field": {
|
||||
"name": "question"
|
||||
}
|
||||
|
@ -417,7 +417,7 @@ The mapping above represents the following tree:
|
|||
|
|
||||
vote
|
||||
|
||||
Indexing a grand child document requires a `routing` value equals
|
||||
Indexing a grandchild document requires a `routing` value equals
|
||||
to the grand-parent (the greater parent of the lineage):
|
||||
|
||||
|
||||
|
@ -436,4 +436,4 @@ PUT my_index/_doc/3?routing=1&refresh <1>
|
|||
// TEST[continued]
|
||||
|
||||
<1> This child document must be on the same shard than its grand-parent and parent
|
||||
<2> The parent id of this document (must points to an `answer` document)
|
||||
<2> The parent id of this document (must points to an `answer` document)
|
||||
|
|
|
@ -119,3 +119,10 @@ while now an exception is thrown.
|
|||
|
||||
The deprecated graph endpoints (those with `/_graph/_explore`) have been
|
||||
removed.
|
||||
|
||||
|
||||
[float]
|
||||
==== Deprecated `_termvector` endpoint removed
|
||||
|
||||
The `_termvector` endpoint was deprecated in 2.0 and has now been removed.
|
||||
The endpoint `_termvectors` (plural) should be used instead.
|
||||
|
|
|
@ -32,4 +32,10 @@ was moved to `org.elasticsearch.search.aggregations.PipelineAggregationBuilders`
|
|||
==== `Retry.withBackoff` methods with `Settings` removed
|
||||
|
||||
The variants of `Retry.withBackoff` that included `Settings` have been removed
|
||||
because `Settings` is no longer needed.
|
||||
because `Settings` is no longer needed.
|
||||
|
||||
[float]
|
||||
==== Deprecated method `Client#termVector` removed
|
||||
|
||||
The client method `termVector`, deprecated in 2.0, has been removed. The method
|
||||
`termVectors` (plural) should be used instead.
|
|
@ -125,6 +125,11 @@ TIP: Keeping older segments alive means that more file handles are needed.
|
|||
Ensure that you have configured your nodes to have ample free file handles.
|
||||
See <<file-descriptors>>.
|
||||
|
||||
NOTE: To prevent against issues caused by having too many scrolls open, the
|
||||
user is not allowed to open scrolls past a certain limit. By default, the
|
||||
maximum number of open scrolls is 500. This limit can be updated with the
|
||||
`search.max_open_scroll_context` cluster setting.
|
||||
|
||||
You can check how many search contexts are open with the
|
||||
<<cluster-nodes-stats,nodes stats API>>:
|
||||
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
"bulk without types on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: index
|
||||
_id: 0
|
||||
- foo: bar
|
||||
- index:
|
||||
_index: index
|
||||
_id: 1
|
||||
- foo: bar
|
||||
|
||||
- do:
|
||||
count:
|
||||
index: index
|
||||
|
||||
- match: {count: 2}
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
"DELETE with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
delete:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
|
||||
- match: { error.root_cause.0.reason: "/Rejecting.mapping.update.to.\\[index\\].as.the.final.mapping.would.have.more.than.1.type.*/" }
|
||||
|
||||
- do:
|
||||
delete:
|
||||
index: index
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 2}
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
"Explain with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
explain:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "some_random_type" }
|
||||
- match: { _id: "1"}
|
||||
- match: { matched: false}
|
||||
|
||||
- do:
|
||||
explain:
|
||||
index: index
|
||||
type: _doc #todo: make _explain typeless and remove this
|
||||
id: 1
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- is_true: matched
|
||||
- match: { explanation.value: 1 }
|
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
"GET with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
get:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "some_random_type" }
|
||||
- match: { _id: "1"}
|
||||
- match: { found: false}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: index
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 1}
|
||||
- match: { _source: { foo: bar }}
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
"Index with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 1}
|
||||
|
||||
- do:
|
||||
get: # not using typeless API on purpose
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "not_doc" } # the important bit to check
|
||||
- match: { _id: "1"}
|
||||
- match: { _version: 1}
|
||||
- match: { _source: { foo: bar }}
|
||||
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
body: { foo: bar }
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "_doc" }
|
||||
- match: { _version: 1}
|
||||
- set: { _id: id }
|
||||
|
||||
- do:
|
||||
get: # using typeful API on purpose
|
||||
index: index
|
||||
type: not_doc
|
||||
id: '$id'
|
||||
|
||||
- match: { _index: "index" }
|
||||
- match: { _type: "not_doc" } # the important bit to check
|
||||
- match: { _id: $id}
|
||||
- match: { _version: 1}
|
||||
- match: { _source: { foo: bar }}
|
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
"GET mapping with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: include_type_name was introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
|
||||
- match: { index.mappings.properties.foo.type: "keyword" }
|
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
"PUT mapping with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: include_type_name was introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
indices.put_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
body:
|
||||
properties:
|
||||
bar:
|
||||
type: "long"
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
|
||||
- match: { index.mappings.properties.foo.type: "keyword" }
|
||||
- match: { index.mappings.properties.bar.type: "long" }
|
||||
|
||||
- do:
|
||||
indices.put_mapping:
|
||||
include_type_name: false
|
||||
index: index
|
||||
body:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword" # also test no-op updates that trigger special logic wrt the mapping version
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
indices.put_mapping:
|
||||
index: index
|
||||
body:
|
||||
some_other_type:
|
||||
properties:
|
||||
bar:
|
||||
type: "long"
|
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
"mtermvectors without types on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type : "text"
|
||||
term_vector : "with_positions_offsets"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
mtermvectors:
|
||||
body:
|
||||
docs:
|
||||
- _index: index
|
||||
_id: 1
|
||||
|
||||
- match: {docs.0.term_vectors.foo.terms.bar.term_freq: 1}
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
"Term vectors with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "text"
|
||||
term_vector: "with_positions"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: index
|
||||
type: _doc # todo: remove when termvectors support typeless API
|
||||
id: 1
|
||||
|
||||
- is_true: found
|
||||
- match: {_type: _doc}
|
||||
- match: {term_vectors.foo.terms.bar.term_freq: 1}
|
||||
|
||||
- do:
|
||||
termvectors:
|
||||
index: index
|
||||
type: some_random_type
|
||||
id: 1
|
||||
|
||||
- is_false: found
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
"Update with typeless API on an index that has types":
|
||||
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Typeless APIs were introduced in 7.0.0
|
||||
|
||||
- do:
|
||||
indices.create: # not using include_type_name: false on purpose
|
||||
index: index
|
||||
body:
|
||||
mappings:
|
||||
not_doc:
|
||||
properties:
|
||||
foo:
|
||||
type: "keyword"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
body: { foo: bar }
|
||||
|
||||
- do:
|
||||
update:
|
||||
index: index
|
||||
id: 1
|
||||
body:
|
||||
doc:
|
||||
foo: baz
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: index
|
||||
type: not_doc
|
||||
id: 1
|
||||
|
||||
- match: { _source.foo: baz }
|
|
@ -36,6 +36,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
public class Version implements Comparable<Version>, ToXContentFragment {
|
||||
/*
|
||||
|
@ -192,7 +193,30 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
case V_EMPTY_ID:
|
||||
return V_EMPTY;
|
||||
default:
|
||||
return new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
// We need at least the major of the Lucene version to be correct.
|
||||
// Our best guess is to use the same Lucene version as the previous
|
||||
// version in the list, assuming that it didn't change. This is at
|
||||
// least correct for patch versions of known minors since we never
|
||||
// update the Lucene dependency for patch versions.
|
||||
List<Version> versions = DeclaredVersionsHolder.DECLARED_VERSIONS;
|
||||
Version tmp = new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
int index = Collections.binarySearch(versions, tmp);
|
||||
if (index < 0) {
|
||||
index = -2 - index;
|
||||
} else {
|
||||
assert false : "Version [" + tmp + "] is declared but absent from the switch statement in Version#fromId";
|
||||
}
|
||||
final org.apache.lucene.util.Version luceneVersion;
|
||||
if (index == -1) {
|
||||
// this version is older than any supported version, so we
|
||||
// assume it is the previous major to the oldest Lucene version
|
||||
// that we know about
|
||||
luceneVersion = org.apache.lucene.util.Version.fromBits(
|
||||
versions.get(0).luceneVersion.major - 1, 0, 0);
|
||||
} else {
|
||||
luceneVersion = versions.get(index).luceneVersion;
|
||||
}
|
||||
return new Version(id, luceneVersion);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -300,7 +324,7 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
this.minor = (byte) ((id / 10000) % 100);
|
||||
this.revision = (byte) ((id / 100) % 100);
|
||||
this.build = (byte) (id % 100);
|
||||
this.luceneVersion = luceneVersion;
|
||||
this.luceneVersion = Objects.requireNonNull(luceneVersion);
|
||||
}
|
||||
|
||||
public boolean after(Version version) {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -77,14 +76,14 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction<Ty
|
|||
return;
|
||||
}
|
||||
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings = state.metaData().getIndices().get(concreteIndex).getMappings();
|
||||
if (mappings.isEmpty()) {
|
||||
MappingMetaData mapping = state.metaData().getIndices().get(concreteIndex).mapping();
|
||||
if (mapping == null) {
|
||||
listener.onResponse(new TypesExistsResponse(false));
|
||||
return;
|
||||
}
|
||||
|
||||
for (String type : request.types()) {
|
||||
if (!mappings.containsKey(type)) {
|
||||
if (mapping.type().equals(type) == false) {
|
||||
listener.onResponse(new TypesExistsResponse(false));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -334,8 +334,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
case DELETE:
|
||||
docWriteRequest.routing(metaData.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index()));
|
||||
// check if routing is required, if so, throw error if routing wasn't specified
|
||||
if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(),
|
||||
docWriteRequest.type())) {
|
||||
if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName())) {
|
||||
throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id());
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -30,11 +30,15 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
|
@ -83,7 +87,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
request.request().index());
|
||||
request.request().filteringAlias(aliasFilter);
|
||||
// Fail fast on the node that received the request.
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) {
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id());
|
||||
}
|
||||
}
|
||||
|
@ -104,15 +108,19 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
|
||||
@Override
|
||||
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
|
||||
String[] types;
|
||||
if (MapperService.SINGLE_MAPPING_NAME.equals(request.type())) { // typeless explain call
|
||||
types = Strings.EMPTY_ARRAY;
|
||||
} else {
|
||||
types = new String[] { request.type() };
|
||||
}
|
||||
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
|
||||
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
|
||||
types, request.nowInMillis, request.filteringAlias());
|
||||
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT);
|
||||
Engine.GetResult result = null;
|
||||
try {
|
||||
Term uidTerm = context.mapperService().createUidTerm(request.type(), request.id());
|
||||
if (uidTerm == null) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
// No need to check the type, IndexShard#get does it for us
|
||||
Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(request.id()));
|
||||
result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm));
|
||||
if (!result.exists()) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
|
|
|
@ -71,7 +71,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
// update the routing (request#index here is possibly an alias)
|
||||
request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index()));
|
||||
// Fail fast on the node that received the request.
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) {
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
|
|||
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
|
||||
|
||||
item.routing(clusterState.metaData().resolveIndexRouting(item.routing(), item.index()));
|
||||
if ((item.routing() == null) && (clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type()))) {
|
||||
if ((item.routing() == null) && (clusterState.getMetaData().routingRequired(concreteSingleIndex))) {
|
||||
responses.set(i, newItemFailure(concreteSingleIndex, item.type(), item.id(),
|
||||
new RoutingMissingException(concreteSingleIndex, item.type(), item.id())));
|
||||
continue;
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -101,6 +102,7 @@ public class MultiTermVectorsRequest extends ActionRequest
|
|||
throw new IllegalArgumentException("docs array element should include an object");
|
||||
}
|
||||
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template);
|
||||
termVectorsRequest.type(MapperService.SINGLE_MAPPING_NAME);
|
||||
TermVectorsRequest.parseRequest(termVectorsRequest, parser);
|
||||
add(termVectorsRequest);
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
|
|||
}
|
||||
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
|
||||
if (termVectorsRequest.routing() == null &&
|
||||
clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
|
||||
clusterState.getMetaData().routingRequired(concreteSingleIndex)) {
|
||||
responses.set(i, new MultiTermVectorsItemResponse(null,
|
||||
new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(),
|
||||
new RoutingMissingException(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id()))));
|
||||
|
|
|
@ -79,7 +79,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
|
|||
// update the routing (request#index here is possibly an alias or a parent)
|
||||
request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index()));
|
||||
// Fail fast on the node that received the request.
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) {
|
||||
if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex())) {
|
||||
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
public static void resolveAndValidateRouting(MetaData metaData, String concreteIndex, UpdateRequest request) {
|
||||
request.routing((metaData.resolveWriteIndexRouting(request.routing(), request.index())));
|
||||
// Fail fast on the node that received the request, rather than failing when translating on the index or delete request.
|
||||
if (request.routing() == null && metaData.routingRequired(concreteIndex, request.type())) {
|
||||
if (request.routing() == null && metaData.routingRequired(concreteIndex)) {
|
||||
throw new RoutingMissingException(concreteIndex, request.type(), request.id());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -370,39 +370,6 @@ public interface Client extends ElasticsearchClient, Releasable {
|
|||
*/
|
||||
TermVectorsRequestBuilder prepareTermVectors(String index, String type, String id);
|
||||
|
||||
/**
|
||||
* An action that returns the term vectors for a specific document.
|
||||
*
|
||||
* @param request The term vector request
|
||||
* @return The response future
|
||||
*/
|
||||
@Deprecated
|
||||
ActionFuture<TermVectorsResponse> termVector(TermVectorsRequest request);
|
||||
|
||||
/**
|
||||
* An action that returns the term vectors for a specific document.
|
||||
*
|
||||
* @param request The term vector request
|
||||
*/
|
||||
@Deprecated
|
||||
void termVector(TermVectorsRequest request, ActionListener<TermVectorsResponse> listener);
|
||||
|
||||
/**
|
||||
* Builder for the term vector request.
|
||||
*/
|
||||
@Deprecated
|
||||
TermVectorsRequestBuilder prepareTermVector();
|
||||
|
||||
/**
|
||||
* Builder for the term vector request.
|
||||
*
|
||||
* @param index The index to load the document from
|
||||
* @param type The type of the document
|
||||
* @param id The id of the document
|
||||
*/
|
||||
@Deprecated
|
||||
TermVectorsRequestBuilder prepareTermVector(String index, String type, String id);
|
||||
|
||||
/**
|
||||
* Multi get term vectors.
|
||||
*/
|
||||
|
|
|
@ -581,30 +581,6 @@ public abstract class AbstractClient implements Client {
|
|||
return new TermVectorsRequestBuilder(this, TermVectorsAction.INSTANCE, index, type, id);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public ActionFuture<TermVectorsResponse> termVector(final TermVectorsRequest request) {
|
||||
return termVectors(request);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public void termVector(final TermVectorsRequest request, final ActionListener<TermVectorsResponse> listener) {
|
||||
termVectors(request, listener);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public TermVectorsRequestBuilder prepareTermVector() {
|
||||
return prepareTermVectors();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public TermVectorsRequestBuilder prepareTermVector(String index, String type, String id) {
|
||||
return prepareTermVectors(index, type, id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<MultiTermVectorsResponse> multiTermVectors(final MultiTermVectorsRequest request) {
|
||||
return execute(MultiTermVectorsAction.INSTANCE, request);
|
||||
|
|
|
@ -449,13 +449,37 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
return this.aliases;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an object that maps each type to the associated mappings.
|
||||
* The return value is never {@code null} but may be empty if the index
|
||||
* has no mappings.
|
||||
* @deprecated Use {@link #mapping()} instead now that indices have a single type
|
||||
*/
|
||||
@Deprecated
|
||||
public ImmutableOpenMap<String, MappingMetaData> getMappings() {
|
||||
return mappings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the concrete mapping for this index or {@code null} if this index has no mappings at all.
|
||||
*/
|
||||
@Nullable
|
||||
public MappingMetaData mapping(String mappingType) {
|
||||
return mappings.get(mappingType);
|
||||
public MappingMetaData mapping() {
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : mappings) {
|
||||
if (cursor.key.equals(MapperService.DEFAULT_MAPPING) == false) {
|
||||
return cursor.value;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the default mapping.
|
||||
* NOTE: this is always {@code null} for 7.x indices which are disallowed to have a default mapping.
|
||||
*/
|
||||
@Nullable
|
||||
public MappingMetaData defaultMapping() {
|
||||
return mappings.get(MapperService.DEFAULT_MAPPING);
|
||||
}
|
||||
|
||||
public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid";
|
||||
|
|
|
@ -742,13 +742,12 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
|
|||
|
||||
/**
|
||||
* @param concreteIndex The concrete index to check if routing is required
|
||||
* @param type The type to check if routing is required
|
||||
* @return Whether routing is required according to the mapping for the specified index and type
|
||||
*/
|
||||
public boolean routingRequired(String concreteIndex, String type) {
|
||||
public boolean routingRequired(String concreteIndex) {
|
||||
IndexMetaData indexMetaData = indices.get(concreteIndex);
|
||||
if (indexMetaData != null) {
|
||||
MappingMetaData mappingMetaData = indexMetaData.getMappings().get(type);
|
||||
MappingMetaData mappingMetaData = indexMetaData.mapping();
|
||||
if (mappingMetaData != null) {
|
||||
return mappingMetaData.routing().required();
|
||||
}
|
||||
|
|
|
@ -263,7 +263,7 @@ public class MetaDataMappingService {
|
|||
updateList.add(indexMetaData);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(request.type());
|
||||
DocumentMapper existingMapper = mapperService.documentMapper();
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = mapperService.parse(request.type(), mappingUpdateSource, false);
|
||||
|
@ -295,12 +295,22 @@ public class MetaDataMappingService {
|
|||
// we use the exact same indexService and metadata we used to validate above here to actually apply the update
|
||||
final Index index = indexMetaData.getIndex();
|
||||
final MapperService mapperService = indexMapperServices.get(index);
|
||||
String typeForUpdate = mappingType; // the type to use to apply the mapping update
|
||||
if (MapperService.SINGLE_MAPPING_NAME.equals(typeForUpdate)) {
|
||||
// If the user gave _doc as a special type value or if (s)he is using the new typeless APIs,
|
||||
// then we apply the mapping update to the existing type. This allows to move to typeless
|
||||
// APIs with indices whose type name is different from `_doc`.
|
||||
DocumentMapper mapper = mapperService.documentMapper();
|
||||
if (mapper != null) {
|
||||
typeForUpdate = mapper.type();
|
||||
}
|
||||
}
|
||||
CompressedXContent existingSource = null;
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(mappingType);
|
||||
DocumentMapper existingMapper = mapperService.documentMapper(typeForUpdate);
|
||||
if (existingMapper != null) {
|
||||
existingSource = existingMapper.mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE);
|
||||
DocumentMapper mergedMapper = mapperService.merge(typeForUpdate, mappingUpdateSource, MergeReason.MAPPING_UPDATE);
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
|
|
|
@ -397,6 +397,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
SearchService.MAX_KEEPALIVE_SETTING,
|
||||
MultiBucketConsumerService.MAX_BUCKET_SETTING,
|
||||
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
|
||||
SearchService.MAX_OPEN_SCROLL_CONTEXT,
|
||||
Node.WRITE_PORTS_FILE_SETTING,
|
||||
Node.NODE_NAME_SETTING,
|
||||
Node.NODE_DATA_SETTING,
|
||||
|
|
|
@ -2108,8 +2108,9 @@ public class InternalEngine extends Engine {
|
|||
// Give us the opportunity to upgrade old segments while performing
|
||||
// background merges
|
||||
MergePolicy mergePolicy = config().getMergePolicy();
|
||||
// always configure soft-deletes field so an engine with soft-deletes disabled can open a Lucene index with soft-deletes.
|
||||
iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
|
||||
if (softDeleteEnabled) {
|
||||
iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
|
||||
mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery,
|
||||
new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
|
||||
}
|
||||
|
|
|
@ -39,10 +39,12 @@ import org.elasticsearch.index.engine.Engine;
|
|||
import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
|
||||
import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Mapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
@ -157,13 +159,11 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
|
||||
Engine.GetResult get = null;
|
||||
if (type != null) {
|
||||
Term uidTerm = mapperService.createUidTerm(type, id);
|
||||
if (uidTerm != null) {
|
||||
get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm)
|
||||
.version(version).versionType(versionType));
|
||||
if (get.exists() == false) {
|
||||
get.close();
|
||||
}
|
||||
Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||
get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm)
|
||||
.version(version).versionType(versionType));
|
||||
if (get.exists() == false) {
|
||||
get.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
}
|
||||
}
|
||||
|
||||
DocumentMapper docMapper = mapperService.documentMapper(type);
|
||||
DocumentMapper docMapper = mapperService.documentMapper();
|
||||
|
||||
if (gFields != null && gFields.length > 0) {
|
||||
for (String field : gFields) {
|
||||
|
|
|
@ -106,7 +106,8 @@ final class DocumentParser {
|
|||
throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]");
|
||||
}
|
||||
|
||||
if (Objects.equals(source.type(), docMapper.type()) == false) {
|
||||
if (Objects.equals(source.type(), docMapper.type()) == false &&
|
||||
MapperService.SINGLE_MAPPING_NAME.equals(source.type()) == false) { // used by typeless APIs
|
||||
throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type ["
|
||||
+ docMapper.type() + "]");
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.logging.log4j.LogManager;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.elasticsearch.Assertions;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -218,7 +217,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
for (DocumentMapper documentMapper : updatedEntries.values()) {
|
||||
String mappingType = documentMapper.type();
|
||||
CompressedXContent incomingMappingSource = newIndexMetaData.mapping(mappingType).source();
|
||||
MappingMetaData mappingMetaData;
|
||||
if (mappingType.equals(MapperService.DEFAULT_MAPPING)) {
|
||||
mappingMetaData = newIndexMetaData.defaultMapping();
|
||||
} else {
|
||||
mappingMetaData = newIndexMetaData.mapping();
|
||||
assert mappingType.equals(mappingMetaData.type());
|
||||
}
|
||||
CompressedXContent incomingMappingSource = mappingMetaData.source();
|
||||
|
||||
String op = existingMappers.contains(mappingType) ? "updated" : "added";
|
||||
if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) {
|
||||
|
@ -254,13 +260,25 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
if (currentIndexMetaData.getMappingVersion() == newIndexMetaData.getMappingVersion()) {
|
||||
// if the mapping version is unchanged, then there should not be any updates and all mappings should be the same
|
||||
assert updatedEntries.isEmpty() : updatedEntries;
|
||||
for (final ObjectCursor<MappingMetaData> mapping : newIndexMetaData.getMappings().values()) {
|
||||
final CompressedXContent currentSource = currentIndexMetaData.mapping(mapping.value.type()).source();
|
||||
final CompressedXContent newSource = mapping.value.source();
|
||||
|
||||
MappingMetaData defaultMapping = newIndexMetaData.defaultMapping();
|
||||
if (defaultMapping != null) {
|
||||
final CompressedXContent currentSource = currentIndexMetaData.defaultMapping().source();
|
||||
final CompressedXContent newSource = defaultMapping.source();
|
||||
assert currentSource.equals(newSource) :
|
||||
"expected current mapping [" + currentSource + "] for type [" + mapping.value.type() + "] "
|
||||
"expected current mapping [" + currentSource + "] for type [" + defaultMapping.type() + "] "
|
||||
+ "to be the same as new mapping [" + newSource + "]";
|
||||
}
|
||||
|
||||
MappingMetaData mapping = newIndexMetaData.mapping();
|
||||
if (mapping != null) {
|
||||
final CompressedXContent currentSource = currentIndexMetaData.mapping().source();
|
||||
final CompressedXContent newSource = mapping.source();
|
||||
assert currentSource.equals(newSource) :
|
||||
"expected current mapping [" + currentSource + "] for type [" + mapping.type() + "] "
|
||||
+ "to be the same as new mapping [" + newSource + "]";
|
||||
}
|
||||
|
||||
} else {
|
||||
// if the mapping version is changed, it should increase, there should be updates, and the mapping should be different
|
||||
final long currentMappingVersion = currentIndexMetaData.getMappingVersion();
|
||||
|
@ -270,7 +288,13 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
+ "to be less than new mapping version [" + newMappingVersion + "]";
|
||||
assert updatedEntries.isEmpty() == false;
|
||||
for (final DocumentMapper documentMapper : updatedEntries.values()) {
|
||||
final MappingMetaData currentMapping = currentIndexMetaData.mapping(documentMapper.type());
|
||||
final MappingMetaData currentMapping;
|
||||
if (documentMapper.type().equals(MapperService.DEFAULT_MAPPING)) {
|
||||
currentMapping = currentIndexMetaData.defaultMapping();
|
||||
} else {
|
||||
currentMapping = currentIndexMetaData.mapping();
|
||||
assert currentMapping == null || documentMapper.type().equals(currentMapping.type());
|
||||
}
|
||||
if (currentMapping != null) {
|
||||
final CompressedXContent currentSource = currentMapping.source();
|
||||
final CompressedXContent newSource = documentMapper.mappingSource();
|
||||
|
@ -766,11 +790,4 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/** Return a term that uniquely identifies the document, or {@code null} if the type is not allowed. */
|
||||
public Term createUidTerm(String type, String id) {
|
||||
if (mapper == null || mapper.type().equals(type) == false) {
|
||||
return null;
|
||||
}
|
||||
return new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.search.ReferenceManager;
|
|||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.elasticsearch.Assertions;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -63,6 +62,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
|
||||
import org.elasticsearch.common.util.concurrent.RunOnce;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -77,6 +77,7 @@ import org.elasticsearch.index.cache.request.ShardRequestCache;
|
|||
import org.elasticsearch.index.codec.CodecService;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.Engine.GetResult;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
|
@ -548,7 +549,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
} catch (final AlreadyClosedException e) {
|
||||
// okay, the index was deleted
|
||||
}
|
||||
});
|
||||
}, null);
|
||||
}
|
||||
}
|
||||
// set this last, once we finished updating all internal state.
|
||||
|
@ -815,23 +816,23 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
} catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) {
|
||||
return new Engine.DeleteResult(e, version, operationPrimaryTerm, seqNo, false);
|
||||
}
|
||||
final Term uid = extractUidForDelete(type, id);
|
||||
if (resolveType(type).equals(mapperService.documentMapper().type()) == false) {
|
||||
// We should never get there due to the fact that we generate mapping updates on deletes,
|
||||
// but we still prefer to have a hard exception here as we would otherwise delete a
|
||||
// document in the wrong type.
|
||||
throw new IllegalStateException("Deleting document from type [" + resolveType(type) + "] while current type is [" +
|
||||
mapperService.documentMapper().type() + "]");
|
||||
}
|
||||
final Term uid = new Term(IdFieldMapper.NAME, Uid.encodeId(id));
|
||||
final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version,
|
||||
versionType, origin);
|
||||
return delete(getEngine(), delete);
|
||||
}
|
||||
|
||||
private static Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version,
|
||||
private Engine.Delete prepareDelete(String type, String id, Term uid, long seqNo, long primaryTerm, long version,
|
||||
VersionType versionType, Engine.Operation.Origin origin) {
|
||||
long startTime = System.nanoTime();
|
||||
return new Engine.Delete(type, id, uid, seqNo, primaryTerm, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
private Term extractUidForDelete(String type, String id) {
|
||||
// This is only correct because we create types dynamically on delete operations
|
||||
// otherwise this could match the same _id from a different type
|
||||
BytesRef idBytes = Uid.encodeId(id);
|
||||
return new Term(IdFieldMapper.NAME, idBytes);
|
||||
return new Engine.Delete(resolveType(type), id, uid, seqNo, primaryTerm, version, versionType, origin, startTime);
|
||||
}
|
||||
|
||||
private Engine.DeleteResult delete(Engine engine, Engine.Delete delete) throws IOException {
|
||||
|
@ -853,6 +854,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
public Engine.GetResult get(Engine.Get get) {
|
||||
readAllowed();
|
||||
DocumentMapper mapper = mapperService.documentMapper();
|
||||
if (mapper == null || mapper.type().equals(resolveType(get.type())) == false) {
|
||||
return GetResult.NOT_EXISTS;
|
||||
}
|
||||
return getEngine().get(get, this::acquireSearcher);
|
||||
}
|
||||
|
||||
|
@ -2273,8 +2278,23 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If an index/update/get/delete operation is using the special `_doc` type, then we replace
|
||||
* it with the actual type that is being used in the mappings so that users may use typeless
|
||||
* APIs with indices that have types.
|
||||
*/
|
||||
private String resolveType(String type) {
|
||||
if (MapperService.SINGLE_MAPPING_NAME.equals(type)) {
|
||||
DocumentMapper docMapper = mapperService.documentMapper();
|
||||
if (docMapper != null) {
|
||||
return docMapper.type();
|
||||
}
|
||||
}
|
||||
return type;
|
||||
}
|
||||
|
||||
private DocumentMapperForType docMapper(String type) {
|
||||
return mapperService.documentMapperWithAutoCreate(type);
|
||||
return mapperService.documentMapperWithAutoCreate(resolveType(type));
|
||||
}
|
||||
|
||||
private EngineConfig newEngineConfig() {
|
||||
|
@ -2316,14 +2336,26 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
indexShardOperationPermits.asyncBlockOperations(onPermitAcquired, timeout.duration(), timeout.timeUnit());
|
||||
}
|
||||
|
||||
private <E extends Exception> void bumpPrimaryTerm(final long newPrimaryTerm, final CheckedRunnable<E> onBlocked) {
|
||||
private <E extends Exception> void bumpPrimaryTerm(final long newPrimaryTerm,
|
||||
final CheckedRunnable<E> onBlocked,
|
||||
@Nullable ActionListener<Releasable> combineWithAction) {
|
||||
assert Thread.holdsLock(mutex);
|
||||
assert newPrimaryTerm > pendingPrimaryTerm;
|
||||
assert newPrimaryTerm > pendingPrimaryTerm || (newPrimaryTerm >= pendingPrimaryTerm && combineWithAction != null);
|
||||
assert operationPrimaryTerm <= pendingPrimaryTerm;
|
||||
final CountDownLatch termUpdated = new CountDownLatch(1);
|
||||
indexShardOperationPermits.asyncBlockOperations(new ActionListener<Releasable>() {
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
try {
|
||||
innerFail(e);
|
||||
} finally {
|
||||
if (combineWithAction != null) {
|
||||
combineWithAction.onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFail(final Exception e) {
|
||||
try {
|
||||
failShard("exception during primary term transition", e);
|
||||
} catch (AlreadyClosedException ace) {
|
||||
|
@ -2333,7 +2365,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
@Override
|
||||
public void onResponse(final Releasable releasable) {
|
||||
try (Releasable ignored = releasable) {
|
||||
final RunOnce releaseOnce = new RunOnce(releasable::close);
|
||||
try {
|
||||
assert operationPrimaryTerm <= pendingPrimaryTerm;
|
||||
termUpdated.await();
|
||||
// indexShardOperationPermits doesn't guarantee that async submissions are executed
|
||||
|
@ -2343,7 +2376,17 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
onBlocked.run();
|
||||
}
|
||||
} catch (final Exception e) {
|
||||
onFailure(e);
|
||||
if (combineWithAction == null) {
|
||||
// otherwise leave it to combineWithAction to release the permit
|
||||
releaseOnce.run();
|
||||
}
|
||||
innerFail(e);
|
||||
} finally {
|
||||
if (combineWithAction != null) {
|
||||
combineWithAction.onResponse(releasable);
|
||||
} else {
|
||||
releaseOnce.run();
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 30, TimeUnit.MINUTES);
|
||||
|
@ -2371,7 +2414,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
public void acquireReplicaOperationPermit(final long opPrimaryTerm, final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes,
|
||||
final ActionListener<Releasable> onPermitAcquired, final String executorOnDelay,
|
||||
final Object debugInfo) {
|
||||
innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired,
|
||||
innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired, false,
|
||||
(listener) -> indexShardOperationPermits.acquire(listener, executorOnDelay, true, debugInfo));
|
||||
}
|
||||
|
||||
|
@ -2393,7 +2436,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
final long maxSeqNoOfUpdatesOrDeletes,
|
||||
final ActionListener<Releasable> onPermitAcquired,
|
||||
final TimeValue timeout) {
|
||||
innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired,
|
||||
innerAcquireReplicaOperationPermit(opPrimaryTerm, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, onPermitAcquired, true,
|
||||
(listener) -> indexShardOperationPermits.asyncBlockOperations(listener, timeout.duration(), timeout.timeUnit()));
|
||||
}
|
||||
|
||||
|
@ -2401,41 +2444,16 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
final long globalCheckpoint,
|
||||
final long maxSeqNoOfUpdatesOrDeletes,
|
||||
final ActionListener<Releasable> onPermitAcquired,
|
||||
final Consumer<ActionListener<Releasable>> consumer) {
|
||||
final boolean allowCombineOperationWithPrimaryTermUpdate,
|
||||
final Consumer<ActionListener<Releasable>> operationExecutor) {
|
||||
verifyNotClosed();
|
||||
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||
synchronized (mutex) {
|
||||
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||
final IndexShardState shardState = state();
|
||||
// only roll translog and update primary term if shard has made it past recovery
|
||||
// Having a new primary term here means that the old primary failed and that there is a new primary, which again
|
||||
// means that the master will fail this shard as all initializing shards are failed when a primary is selected
|
||||
// We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint
|
||||
if (shardState != IndexShardState.POST_RECOVERY &&
|
||||
shardState != IndexShardState.STARTED) {
|
||||
throw new IndexShardNotStartedException(shardId, shardState);
|
||||
}
|
||||
|
||||
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||
bumpPrimaryTerm(opPrimaryTerm, () -> {
|
||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
|
||||
final long currentGlobalCheckpoint = getGlobalCheckpoint();
|
||||
final long maxSeqNo = seqNoStats().getMaxSeqNo();
|
||||
logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]",
|
||||
opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo);
|
||||
if (currentGlobalCheckpoint < maxSeqNo) {
|
||||
resetEngineToGlobalCheckpoint();
|
||||
} else {
|
||||
getEngine().rollTranslogGeneration();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert opPrimaryTerm <= pendingPrimaryTerm
|
||||
: "operation primary term [" + opPrimaryTerm + "] should be at most [" + pendingPrimaryTerm + "]";
|
||||
consumer.accept(new ActionListener<Releasable>() {
|
||||
// This listener is used for the execution of the operation. If the operation requires all the permits for its
|
||||
// execution and the primary term must be updated first, we can combine the operation execution with the
|
||||
// primary term update. Since indexShardOperationPermits doesn't guarantee that async submissions are executed
|
||||
// in the order submitted, combining both operations ensure that the term is updated before the operation is
|
||||
// executed. It also has the side effect of acquiring all the permits one time instead of two.
|
||||
final ActionListener<Releasable> operationListener = new ActionListener<Releasable>() {
|
||||
@Override
|
||||
public void onResponse(final Releasable releasable) {
|
||||
if (opPrimaryTerm < operationPrimaryTerm) {
|
||||
|
@ -2465,7 +2483,48 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
public void onFailure(final Exception e) {
|
||||
onPermitAcquired.onFailure(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) {
|
||||
synchronized (mutex) {
|
||||
if (requirePrimaryTermUpdate(opPrimaryTerm, allowCombineOperationWithPrimaryTermUpdate)) {
|
||||
final IndexShardState shardState = state();
|
||||
// only roll translog and update primary term if shard has made it past recovery
|
||||
// Having a new primary term here means that the old primary failed and that there is a new primary, which again
|
||||
// means that the master will fail this shard as all initializing shards are failed when a primary is selected
|
||||
// We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint
|
||||
if (shardState != IndexShardState.POST_RECOVERY &&
|
||||
shardState != IndexShardState.STARTED) {
|
||||
throw new IndexShardNotStartedException(shardId, shardState);
|
||||
}
|
||||
|
||||
bumpPrimaryTerm(opPrimaryTerm, () -> {
|
||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
|
||||
final long currentGlobalCheckpoint = getGlobalCheckpoint();
|
||||
final long maxSeqNo = seqNoStats().getMaxSeqNo();
|
||||
logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]",
|
||||
opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo);
|
||||
if (currentGlobalCheckpoint < maxSeqNo) {
|
||||
resetEngineToGlobalCheckpoint();
|
||||
} else {
|
||||
getEngine().rollTranslogGeneration();
|
||||
}
|
||||
}, allowCombineOperationWithPrimaryTermUpdate ? operationListener : null);
|
||||
|
||||
if (allowCombineOperationWithPrimaryTermUpdate) {
|
||||
logger.debug("operation execution has been combined with primary term update");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert opPrimaryTerm <= pendingPrimaryTerm
|
||||
: "operation primary term [" + opPrimaryTerm + "] should be at most [" + pendingPrimaryTerm + "]";
|
||||
operationExecutor.accept(operationListener);
|
||||
}
|
||||
|
||||
private boolean requirePrimaryTermUpdate(final long opPrimaryTerm, final boolean allPermits) {
|
||||
return (opPrimaryTerm > pendingPrimaryTerm) || (allPermits && opPrimaryTerm > operationPrimaryTerm);
|
||||
}
|
||||
|
||||
public int getActiveOperationsCount() {
|
||||
|
|
|
@ -149,11 +149,8 @@ final class StoreRecovery {
|
|||
final long maxSeqNo, final long maxUnsafeAutoIdTimestamp, IndexMetaData indexMetaData, int shardId, boolean split,
|
||||
boolean hasNested) throws IOException {
|
||||
|
||||
// clean target directory (if previous recovery attempt failed) and create a fresh segment file with the proper lucene version
|
||||
Lucene.cleanLuceneIndex(target);
|
||||
assert sources.length > 0;
|
||||
final int luceneIndexCreatedVersionMajor = Lucene.readSegmentInfos(sources[0]).getIndexCreatedVersionMajor();
|
||||
new SegmentInfos(luceneIndexCreatedVersionMajor).commit(target);
|
||||
|
||||
final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
|
||||
|
||||
|
@ -164,7 +161,8 @@ final class StoreRecovery {
|
|||
// later once we stared it up otherwise we would need to wait for it here
|
||||
// we also don't specify a codec here and merges should use the engines for this index
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE)
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
|
||||
.setIndexCreatedVersionMajor(luceneIndexCreatedVersionMajor);
|
||||
if (indexSort != null) {
|
||||
iwc.setIndexSort(indexSort);
|
||||
}
|
||||
|
@ -417,7 +415,7 @@ final class StoreRecovery {
|
|||
logger.debug("failed to list file details", e);
|
||||
}
|
||||
} else {
|
||||
store.createEmpty();
|
||||
store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion);
|
||||
final String translogUUID = Translog.createEmptyTranslog(
|
||||
indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId,
|
||||
indexShard.getPendingPrimaryTerm());
|
||||
|
|
|
@ -1404,9 +1404,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
/**
|
||||
* creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted.
|
||||
*/
|
||||
public void createEmpty() throws IOException {
|
||||
public void createEmpty(Version luceneVersion) throws IOException {
|
||||
metadataLock.writeLock().lock();
|
||||
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory, null)) {
|
||||
try (IndexWriter writer = newEmptyIndexWriter(directory, luceneVersion)) {
|
||||
final Map<String, String> map = new HashMap<>();
|
||||
map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
|
||||
map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED));
|
||||
|
@ -1443,7 +1443,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
*/
|
||||
public void bootstrapNewHistory(long maxSeqNo) throws IOException {
|
||||
metadataLock.writeLock().lock();
|
||||
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) {
|
||||
try (IndexWriter writer = newAppendingIndexWriter(directory, null)) {
|
||||
final Map<String, String> map = new HashMap<>();
|
||||
map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
|
||||
map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo));
|
||||
|
@ -1461,7 +1461,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
*/
|
||||
public void associateIndexWithNewTranslog(final String translogUUID) throws IOException {
|
||||
metadataLock.writeLock().lock();
|
||||
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) {
|
||||
try (IndexWriter writer = newAppendingIndexWriter(directory, null)) {
|
||||
if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) {
|
||||
throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]");
|
||||
}
|
||||
|
@ -1480,7 +1480,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
*/
|
||||
public void ensureIndexHasHistoryUUID() throws IOException {
|
||||
metadataLock.writeLock().lock();
|
||||
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) {
|
||||
try (IndexWriter writer = newAppendingIndexWriter(directory, null)) {
|
||||
final Map<String, String> userData = getUserData(writer);
|
||||
if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) {
|
||||
updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()));
|
||||
|
@ -1546,7 +1546,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
+ translogUUID + "]");
|
||||
}
|
||||
if (startingIndexCommit.equals(existingCommits.get(existingCommits.size() - 1)) == false) {
|
||||
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, startingIndexCommit)) {
|
||||
try (IndexWriter writer = newAppendingIndexWriter(directory, startingIndexCommit)) {
|
||||
// this achieves two things:
|
||||
// - by committing a new commit based on the starting commit, it make sure the starting commit will be opened
|
||||
// - deletes any other commit (by lucene standard deletion policy)
|
||||
|
@ -1578,19 +1578,28 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
return userData;
|
||||
}
|
||||
|
||||
private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openMode, final Directory dir, final IndexCommit commit)
|
||||
throws IOException {
|
||||
assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit";
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(null)
|
||||
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
|
||||
.setCommitOnClose(false)
|
||||
private static IndexWriter newAppendingIndexWriter(final Directory dir, final IndexCommit commit) throws IOException {
|
||||
IndexWriterConfig iwc = newIndexWriterConfig()
|
||||
.setIndexCommit(commit)
|
||||
// we don't want merges to happen here - we call maybe merge on the engine
|
||||
// later once we stared it up otherwise we would need to wait for it here
|
||||
// we also don't specify a codec here and merges should use the engines for this index
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE)
|
||||
.setOpenMode(openMode);
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
|
||||
return new IndexWriter(dir, iwc);
|
||||
}
|
||||
|
||||
private static IndexWriter newEmptyIndexWriter(final Directory dir, final Version luceneVersion) throws IOException {
|
||||
IndexWriterConfig iwc = newIndexWriterConfig()
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
|
||||
.setIndexCreatedVersionMajor(luceneVersion.major);
|
||||
return new IndexWriter(dir, iwc);
|
||||
}
|
||||
|
||||
private static IndexWriterConfig newIndexWriterConfig() {
|
||||
return new IndexWriterConfig(null)
|
||||
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
|
||||
.setCommitOnClose(false)
|
||||
// we don't want merges to happen here - we call maybe merge on the engine
|
||||
// later once we stared it up otherwise we would need to wait for it here
|
||||
// we also don't specify a codec here and merges should use the engines for this index
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperForType;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -50,6 +51,7 @@ import org.elasticsearch.index.mapper.ParseContext;
|
|||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
|
||||
|
@ -82,11 +84,7 @@ public class TermVectorsService {
|
|||
final long startTime = nanoTimeSupplier.getAsLong();
|
||||
final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(),
|
||||
request.type(), request.id());
|
||||
final Term uidTerm = indexShard.mapperService().createUidTerm(request.type(), request.id());
|
||||
if (uidTerm == null) {
|
||||
termVectorsResponse.setExists(false);
|
||||
return termVectorsResponse;
|
||||
}
|
||||
final Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(request.id()));
|
||||
|
||||
Fields termVectorsByField = null;
|
||||
AggregatedDfs dfs = null;
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.snapshots.SnapshotsService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -100,6 +99,14 @@ public class RepositoriesService implements ClusterStateApplier {
|
|||
registrationListener = listener;
|
||||
}
|
||||
|
||||
// Trying to create the new repository on master to make sure it works
|
||||
try {
|
||||
closeRepository(createRepository(newRepositoryMetaData));
|
||||
} catch (Exception e) {
|
||||
registrationListener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
|
||||
clusterService.submitStateUpdateTask(request.cause, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, registrationListener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
|
@ -107,13 +114,8 @@ public class RepositoriesService implements ClusterStateApplier {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws IOException {
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
ensureRepositoryNotInUse(currentState, request.name);
|
||||
// Trying to create the new repository on master to make sure it works
|
||||
if (!registerRepository(newRepositoryMetaData)) {
|
||||
// The new repository has the same settings as the old one - ignore
|
||||
return currentState;
|
||||
}
|
||||
MetaData metaData = currentState.metaData();
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
|
||||
|
@ -127,6 +129,10 @@ public class RepositoriesService implements ClusterStateApplier {
|
|||
|
||||
for (RepositoryMetaData repositoryMetaData : repositories.repositories()) {
|
||||
if (repositoryMetaData.name().equals(newRepositoryMetaData.name())) {
|
||||
if (newRepositoryMetaData.equals(repositoryMetaData)) {
|
||||
// Previous version is the same as this one no update is needed.
|
||||
return currentState;
|
||||
}
|
||||
found = true;
|
||||
repositoriesMetaData.add(newRepositoryMetaData);
|
||||
} else {
|
||||
|
@ -352,37 +358,8 @@ public class RepositoriesService implements ClusterStateApplier {
|
|||
throw new RepositoryMissingException(repositoryName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new repository and adds it to the list of registered repositories.
|
||||
* <p>
|
||||
* If a repository with the same name but different types or settings already exists, it will be closed and
|
||||
* replaced with the new repository. If a repository with the same name exists but it has the same type and settings
|
||||
* the new repository is ignored.
|
||||
*
|
||||
* @param repositoryMetaData new repository metadata
|
||||
* @return {@code true} if new repository was added or {@code false} if it was ignored
|
||||
*/
|
||||
private boolean registerRepository(RepositoryMetaData repositoryMetaData) throws IOException {
|
||||
Repository previous = repositories.get(repositoryMetaData.name());
|
||||
if (previous != null) {
|
||||
RepositoryMetaData previousMetadata = previous.getMetadata();
|
||||
if (previousMetadata.equals(repositoryMetaData)) {
|
||||
// Previous version is the same as this one - ignore it
|
||||
return false;
|
||||
}
|
||||
}
|
||||
Repository newRepo = createRepository(repositoryMetaData);
|
||||
if (previous != null) {
|
||||
closeRepository(previous);
|
||||
}
|
||||
Map<String, Repository> newRepositories = new HashMap<>(repositories);
|
||||
newRepositories.put(repositoryMetaData.name(), newRepo);
|
||||
repositories = newRepositories;
|
||||
return true;
|
||||
}
|
||||
|
||||
/** Closes the given repository. */
|
||||
private void closeRepository(Repository repository) throws IOException {
|
||||
private void closeRepository(Repository repository) {
|
||||
logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name());
|
||||
repository.close();
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@ import org.apache.lucene.index.IndexCommit;
|
|||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.IndexNotFoundException;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -1495,11 +1493,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
// version number and no checksum, even though the index itself is perfectly fine to restore, this
|
||||
// empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty
|
||||
// shard anyway, we just create the empty shard here and then exit.
|
||||
IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null)
|
||||
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
|
||||
.setCommitOnClose(true));
|
||||
writer.close();
|
||||
store.createEmpty(targetShard.indexSettings().getIndexMetaData().getCreationVersion().luceneVersion);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,11 +19,9 @@
|
|||
|
||||
package org.elasticsearch.rest.action.document;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsRequest;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -45,19 +43,13 @@ import static org.elasticsearch.rest.RestRequest.Method.POST;
|
|||
* TermVectorsRequest.
|
||||
*/
|
||||
public class RestTermVectorsAction extends BaseRestHandler {
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(
|
||||
LogManager.getLogger(RestTermVectorsAction.class));
|
||||
|
||||
public RestTermVectorsAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerWithDeprecatedHandler(GET, "/{index}/{type}/_termvectors", this,
|
||||
GET, "/{index}/{type}/_termvector", deprecationLogger);
|
||||
controller.registerWithDeprecatedHandler(POST, "/{index}/{type}/_termvectors", this,
|
||||
POST, "/{index}/{type}/_termvector", deprecationLogger);
|
||||
controller.registerWithDeprecatedHandler(GET, "/{index}/{type}/{id}/_termvectors", this,
|
||||
GET, "/{index}/{type}/{id}/_termvector", deprecationLogger);
|
||||
controller.registerWithDeprecatedHandler(POST, "/{index}/{type}/{id}/_termvectors", this,
|
||||
POST, "/{index}/{type}/{id}/_termvector", deprecationLogger);
|
||||
controller.registerHandler(GET, "/{index}/{type}/_termvectors", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_termvectors", this);
|
||||
controller.registerHandler(GET, "/{index}/{type}/{id}/_termvectors", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/{id}/_termvectors", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -112,6 +112,7 @@ import java.util.Map;
|
|||
import java.util.Optional;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.function.Supplier;
|
||||
|
@ -145,6 +146,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
public static final Setting<Boolean> DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS =
|
||||
Setting.boolSetting("search.default_allow_partial_results", true, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static final Setting<Integer> MAX_OPEN_SCROLL_CONTEXT =
|
||||
Setting.intSetting("search.max_open_scroll_context", 500, 0, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
|
@ -174,6 +178,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
|
||||
private volatile boolean lowLevelCancellation;
|
||||
|
||||
private volatile int maxOpenScrollContext;
|
||||
|
||||
private final Cancellable keepAliveReaper;
|
||||
|
||||
private final AtomicLong idGenerator = new AtomicLong();
|
||||
|
@ -182,6 +188,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
|
||||
private final MultiBucketConsumerService multiBucketConsumerService;
|
||||
|
||||
private final AtomicInteger openScrollContexts = new AtomicInteger();
|
||||
|
||||
public SearchService(ClusterService clusterService, IndicesService indicesService,
|
||||
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase,
|
||||
ResponseCollectorService responseCollectorService) {
|
||||
|
@ -212,6 +220,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS,
|
||||
this::setDefaultAllowPartialSearchResults);
|
||||
|
||||
maxOpenScrollContext = MAX_OPEN_SCROLL_CONTEXT.get(settings);
|
||||
clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_SCROLL_CONTEXT, this::setMaxOpenScrollContext);
|
||||
|
||||
lowLevelCancellation = LOW_LEVEL_CANCELLATION_SETTING.get(settings);
|
||||
clusterService.getClusterSettings().addSettingsUpdateConsumer(LOW_LEVEL_CANCELLATION_SETTING, this::setLowLevelCancellation);
|
||||
|
@ -243,6 +253,10 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
return defaultAllowPartialSearchResults;
|
||||
}
|
||||
|
||||
private void setMaxOpenScrollContext(int maxOpenScrollContext) {
|
||||
this.maxOpenScrollContext = maxOpenScrollContext;
|
||||
}
|
||||
|
||||
private void setLowLevelCancellation(Boolean lowLevelCancellation) {
|
||||
this.lowLevelCancellation = lowLevelCancellation;
|
||||
}
|
||||
|
@ -592,11 +606,19 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
}
|
||||
|
||||
final SearchContext createAndPutContext(ShardSearchRequest request) throws IOException {
|
||||
if (request.scroll() != null && openScrollContexts.get() >= maxOpenScrollContext) {
|
||||
throw new ElasticsearchException(
|
||||
"Trying to create too many scroll contexts. Must be less than or equal to: [" +
|
||||
maxOpenScrollContext + "]. " + "This limit can be set by changing the ["
|
||||
+ MAX_OPEN_SCROLL_CONTEXT.getKey() + "] setting.");
|
||||
}
|
||||
|
||||
SearchContext context = createContext(request);
|
||||
boolean success = false;
|
||||
try {
|
||||
putContext(context);
|
||||
if (request.scroll() != null) {
|
||||
openScrollContexts.incrementAndGet();
|
||||
context.indexShard().getSearchOperationListener().onNewScrollContext(context);
|
||||
}
|
||||
context.indexShard().getSearchOperationListener().onNewContext(context);
|
||||
|
@ -696,6 +718,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
assert context.refCount() > 0 : " refCount must be > 0: " + context.refCount();
|
||||
context.indexShard().getSearchOperationListener().onFreeContext(context);
|
||||
if (context.scrollContext() != null) {
|
||||
openScrollContexts.decrementAndGet();
|
||||
context.indexShard().getSearchOperationListener().onFreeScrollContext(context);
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -91,6 +91,7 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
};
|
||||
|
||||
final Index index1 = new Index("index1", randomBase64UUID());
|
||||
final Index index2 = new Index("index2", randomBase64UUID());
|
||||
final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName()))
|
||||
.metaData(new MetaData.Builder()
|
||||
.put(new IndexMetaData.Builder(index1.getName())
|
||||
|
@ -98,33 +99,45 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("type1",
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type1")
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", false)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON))
|
||||
.putMapping("type2",
|
||||
.endObject()), true, XContentType.JSON)))
|
||||
.put(new IndexMetaData.Builder(index2.getName())
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type2")
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", true)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON)))).build();
|
||||
|
||||
final ShardIterator shardIterator = mock(ShardIterator.class);
|
||||
when(shardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
final ShardIterator index1ShardIterator = mock(ShardIterator.class);
|
||||
when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
|
||||
final ShardIterator index2ShardIterator = mock(ShardIterator.class);
|
||||
when(index2ShardIterator.shardId()).thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
final OperationRouting operationRouting = mock(OperationRouting.class);
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index1.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(shardIterator);
|
||||
.thenReturn(index1ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index1.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index1, randomInt()));
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index2.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(index2ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index2.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
clusterService = mock(ClusterService.class);
|
||||
when(clusterService.localNode()).thenReturn(transportService.getLocalNode());
|
||||
|
@ -153,8 +166,8 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE);
|
||||
request.add(new MultiGetRequest.Item("index1", "type1", "1"));
|
||||
request.add(new MultiGetRequest.Item("index1", "type1", "2"));
|
||||
request.add(new MultiGetRequest.Item("index1", "_doc", "1"));
|
||||
request.add(new MultiGetRequest.Item("index1", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiGetAction(transportService, clusterService, shardAction,
|
||||
|
@ -178,8 +191,8 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiGetRequestBuilder request = new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE);
|
||||
request.add(new MultiGetRequest.Item("index1", "type2", "1").routing("1"));
|
||||
request.add(new MultiGetRequest.Item("index1", "type2", "2"));
|
||||
request.add(new MultiGetRequest.Item("index2", "_doc", "1").routing("1"));
|
||||
request.add(new MultiGetRequest.Item("index2", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiGetAction(transportService, clusterService, shardAction,
|
||||
|
@ -193,7 +206,7 @@ public class TransportMultiGetActionTests extends ESTestCase {
|
|||
assertNull(responses.get(0));
|
||||
assertThat(responses.get(1).getFailure().getFailure(), instanceOf(RoutingMissingException.class));
|
||||
assertThat(responses.get(1).getFailure().getFailure().getMessage(),
|
||||
equalTo("routing is required for [index1]/[type2]/[2]"));
|
||||
equalTo("routing is required for [index2]/[_doc]/[2]"));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -506,7 +506,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
for (int id = 0; id < content.length; id++) {
|
||||
Fields[] fields = new Fields[2];
|
||||
for (int j = 0; j < indexNames.length; j++) {
|
||||
TermVectorsResponse resp = client().prepareTermVector(indexNames[j], "type1", String.valueOf(id))
|
||||
TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], "type1", String.valueOf(id))
|
||||
.setOffsets(true)
|
||||
.setPositions(true)
|
||||
.setSelectedFields("field1")
|
||||
|
@ -1069,7 +1069,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
for (int id = 0; id < content.length; id++) {
|
||||
Fields[] fields = new Fields[2];
|
||||
for (int j = 0; j < indexNames.length; j++) {
|
||||
TermVectorsResponse resp = client().prepareTermVector(indexNames[j], "type1", String.valueOf(id))
|
||||
TermVectorsResponse resp = client().prepareTermVectors(indexNames[j], "type1", String.valueOf(id))
|
||||
.setOffsets(true)
|
||||
.setPositions(true)
|
||||
.setSelectedFields("field1", "field2")
|
||||
|
|
|
@ -92,40 +92,53 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase {
|
|||
};
|
||||
|
||||
final Index index1 = new Index("index1", randomBase64UUID());
|
||||
final Index index2 = new Index("index2", randomBase64UUID());
|
||||
final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName()))
|
||||
.metaData(new MetaData.Builder()
|
||||
.put(new IndexMetaData.Builder(index1.getName())
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("type1",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type1")
|
||||
.startObject("_routing")
|
||||
.field("required", false)
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", false)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON))
|
||||
.putMapping("type2",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("type2")
|
||||
.startObject("_routing")
|
||||
.field("required", true)
|
||||
.endObject()), true, XContentType.JSON)))
|
||||
.put(new IndexMetaData.Builder(index2.getName())
|
||||
.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index1.getUUID()))
|
||||
.putMapping("_doc",
|
||||
XContentHelper.convertToJson(BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", true)
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()), true, XContentType.JSON)))).build();
|
||||
.endObject()), true, XContentType.JSON)))).build();
|
||||
|
||||
final ShardIterator shardIterator = mock(ShardIterator.class);
|
||||
when(shardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
final ShardIterator index1ShardIterator = mock(ShardIterator.class);
|
||||
when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt()));
|
||||
|
||||
final ShardIterator index2ShardIterator = mock(ShardIterator.class);
|
||||
when(index2ShardIterator.shardId()).thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
final OperationRouting operationRouting = mock(OperationRouting.class);
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index1.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(shardIterator);
|
||||
.thenReturn(index1ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index1.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index1, randomInt()));
|
||||
when(operationRouting.getShards(eq(clusterState), eq(index2.getName()), anyString(), anyString(), anyString()))
|
||||
.thenReturn(index2ShardIterator);
|
||||
when(operationRouting.shardId(eq(clusterState), eq(index2.getName()), anyString(), anyString()))
|
||||
.thenReturn(new ShardId(index2, randomInt()));
|
||||
|
||||
clusterService = mock(ClusterService.class);
|
||||
when(clusterService.localNode()).thenReturn(transportService.getLocalNode());
|
||||
|
@ -155,8 +168,8 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE);
|
||||
request.add(new TermVectorsRequest("index1", "type1", "1"));
|
||||
request.add(new TermVectorsRequest("index1", "type1", "2"));
|
||||
request.add(new TermVectorsRequest("index1", "_doc", "1"));
|
||||
request.add(new TermVectorsRequest("index2", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiTermVectorsAction(transportService, clusterService, shardAction,
|
||||
|
@ -180,8 +193,8 @@ public class TransportMultiTermVectorsActionTests extends ESTestCase {
|
|||
final Task task = createTask();
|
||||
final NodeClient client = new NodeClient(Settings.EMPTY, threadPool);
|
||||
final MultiTermVectorsRequestBuilder request = new MultiTermVectorsRequestBuilder(client, MultiTermVectorsAction.INSTANCE);
|
||||
request.add(new TermVectorsRequest("index1", "type2", "1").routing("1"));
|
||||
request.add(new TermVectorsRequest("index1", "type2", "2"));
|
||||
request.add(new TermVectorsRequest("index2", "_doc", "1").routing("1"));
|
||||
request.add(new TermVectorsRequest("index2", "_doc", "2"));
|
||||
|
||||
final AtomicBoolean shardActionInvoked = new AtomicBoolean(false);
|
||||
transportAction = new TransportMultiTermVectorsAction(transportService, clusterService, shardAction,
|
||||
|
|
|
@ -278,7 +278,7 @@ public class AckIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().indices().preparePutMapping("test").setType("test").setSource("field", "type=keyword"));
|
||||
|
||||
for (Client client : clients()) {
|
||||
assertThat(getLocalClusterState(client).metaData().indices().get("test").mapping("test"), notNullValue());
|
||||
assertThat(getLocalClusterState(client).metaData().indices().get("test").getMappings().get("test"), notNullValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ public class MetaDataMappingServiceTests extends ESSingleNodeTestCase {
|
|||
// the task really was a mapping update
|
||||
assertThat(
|
||||
indexService.mapperService().documentMapper("type").mappingSource(),
|
||||
not(equalTo(result.resultingState.metaData().index("test").mapping("type").source())));
|
||||
not(equalTo(result.resultingState.metaData().index("test").getMappings().get("type").source())));
|
||||
// since we never committed the cluster state update, the in-memory state is unchanged
|
||||
assertThat(indexService.mapperService().documentMapper("type").mappingSource(), equalTo(currentMapping));
|
||||
}
|
||||
|
|
|
@ -26,16 +26,19 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.hamcrest.MatcherAssert;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.loadDocIdAndVersion;
|
||||
|
@ -193,4 +196,25 @@ public class VersionsTests extends ESTestCase {
|
|||
assertEquals(size, VersionsAndSeqNoResolver.lookupStates.size());
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLuceneVersionOnUnknownVersions() {
|
||||
List<Version> allVersions = VersionUtils.allVersions();
|
||||
|
||||
// should have the same Lucene version as the latest 6.x version
|
||||
Version version = Version.fromString("6.88.50");
|
||||
assertEquals(allVersions.get(Collections.binarySearch(allVersions, Version.V_7_0_0) - 1).luceneVersion,
|
||||
version.luceneVersion);
|
||||
|
||||
// between two known versions, should use the lucene version of the previous version
|
||||
version = Version.fromString("6.2.50");
|
||||
assertEquals(VersionUtils.getPreviousVersion(Version.V_6_2_4).luceneVersion, version.luceneVersion);
|
||||
|
||||
// too old version, major should be the oldest supported lucene version minus 1
|
||||
version = Version.fromString("5.2.1");
|
||||
assertEquals(Version.V_6_0_0.luceneVersion.major - 1, version.luceneVersion.major);
|
||||
|
||||
// future version, should be the same version as today
|
||||
version = Version.fromString("7.77.1");
|
||||
assertEquals(Version.CURRENT.luceneVersion, version.luceneVersion);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> verify meta _routing required exists");
|
||||
MappingMetaData mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData()
|
||||
.index("test").mapping("type1");
|
||||
.index("test").getMappings().get("type1");
|
||||
assertThat(mappingMd.routing().required(), equalTo(true));
|
||||
|
||||
logger.info("--> restarting nodes...");
|
||||
|
@ -96,7 +96,8 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
ensureYellow();
|
||||
|
||||
logger.info("--> verify meta _routing required exists");
|
||||
mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").mapping("type1");
|
||||
mappingMd = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test").getMappings()
|
||||
.get("type1");
|
||||
assertThat(mappingMd.routing().required(), equalTo(true));
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,6 @@ import org.apache.lucene.store.MockDirectoryWrapper;
|
|||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -127,6 +126,7 @@ import org.elasticsearch.index.translog.Translog;
|
|||
import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.hamcrest.MatcherAssert;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
|
@ -2605,7 +2605,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
|
||||
// create
|
||||
{
|
||||
store.createEmpty();
|
||||
store.createEmpty(Version.CURRENT.luceneVersion);
|
||||
final String translogUUID =
|
||||
Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
|
||||
SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
|
||||
|
@ -2769,7 +2769,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
final Path translogPath = createTempDir();
|
||||
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
|
||||
final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get();
|
||||
store.createEmpty();
|
||||
store.createEmpty(Version.CURRENT.luceneVersion);
|
||||
final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get());
|
||||
store.associateIndexWithNewTranslog(translogUUID);
|
||||
try (InternalEngine engine =
|
||||
|
@ -4585,7 +4585,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
final Path translogPath = createTempDir();
|
||||
store = createStore();
|
||||
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
|
||||
store.createEmpty();
|
||||
store.createEmpty(Version.CURRENT.luceneVersion);
|
||||
final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get());
|
||||
store.associateIndexWithNewTranslog(translogUUID);
|
||||
|
||||
|
@ -5454,6 +5454,34 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testOpenSoftDeletesIndexWithSoftDeletesDisabled() throws Exception {
|
||||
try (Store store = createStore()) {
|
||||
Path translogPath = createTempDir();
|
||||
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
|
||||
final IndexSettings softDeletesEnabled = IndexSettingsModule.newIndexSettings(
|
||||
IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(Settings.builder().
|
||||
put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)).build());
|
||||
final List<DocIdSeqNoAndTerm> docs;
|
||||
try (InternalEngine engine = createEngine(
|
||||
config(softDeletesEnabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get))) {
|
||||
List<Engine.Operation> ops = generateReplicaHistory(between(1, 100), randomBoolean());
|
||||
applyOperations(engine, ops);
|
||||
globalCheckpoint.set(randomLongBetween(globalCheckpoint.get(), engine.getLocalCheckpoint()));
|
||||
engine.syncTranslog();
|
||||
engine.flush();
|
||||
docs = getDocIds(engine, true);
|
||||
}
|
||||
final IndexSettings softDeletesDisabled = IndexSettingsModule.newIndexSettings(
|
||||
IndexMetaData.builder(defaultSettings.getIndexMetaData()).settings(Settings.builder()
|
||||
.put(defaultSettings.getSettings()).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false)).build());
|
||||
EngineConfig config = config(softDeletesDisabled, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get);
|
||||
trimUnsafeCommits(config);
|
||||
try (InternalEngine engine = createEngine(config)) {
|
||||
assertThat(getDocIds(engine, true), equalTo(docs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void trimUnsafeCommits(EngineConfig config) throws IOException {
|
||||
final Store store = config.getStore();
|
||||
final TranslogConfig translogConfig = config.getTranslogConfig();
|
||||
|
@ -5472,4 +5500,25 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
assertThat(message, engine.getNumDocUpdates(), equalTo(expectedUpdates));
|
||||
assertThat(message, engine.getNumDocDeletes(), equalTo(expectedDeletes));
|
||||
}
|
||||
|
||||
public void testStoreHonorsLuceneVersion() throws IOException {
|
||||
for (Version createdVersion : Arrays.asList(
|
||||
Version.CURRENT, VersionUtils.getPreviousMinorVersion(), VersionUtils.getFirstVersion())) {
|
||||
Settings settings = Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, createdVersion).build();
|
||||
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings);
|
||||
try (Store store = createStore();
|
||||
InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null))) {
|
||||
ParsedDocument doc = testParsedDocument("1", null, new Document(),
|
||||
new BytesArray("{}".getBytes("UTF-8")), null);
|
||||
engine.index(appendOnlyPrimary(doc, false, 1));
|
||||
engine.refresh("test");
|
||||
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
|
||||
LeafReader leafReader = getOnlyLeafReader(searcher.reader());
|
||||
assertEquals(createdVersion.luceneVersion.major, leafReader.getMetaData().getCreatedVersionMajor());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
|
@ -143,7 +144,7 @@ public class ReadOnlyEngineTests extends EngineTestCase {
|
|||
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
|
||||
try (Store store = createStore()) {
|
||||
EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get);
|
||||
store.createEmpty();
|
||||
store.createEmpty(Version.CURRENT.luceneVersion);
|
||||
try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null , null, true, Function.identity())) {
|
||||
Class<? extends Throwable> expectedException = LuceneTestCase.TEST_ASSERTS_ENABLED ? AssertionError.class :
|
||||
UnsupportedOperationException.class;
|
||||
|
|
|
@ -1550,4 +1550,21 @@ public class DocumentParserTests extends ESSingleNodeTestCase {
|
|||
assertEquals("Could not dynamically add mapping for field [alias-field.dynamic-field]. "
|
||||
+ "Existing mapping for [alias-field] must be of type object but found [alias].", exception.getMessage());
|
||||
}
|
||||
|
||||
public void testTypeless() throws IOException {
|
||||
DocumentMapperParser mapperParser = createIndex("test").mapperService().documentMapperParser();
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder()
|
||||
.startObject().startObject("type").startObject("properties")
|
||||
.startObject("foo").field("type", "keyword").endObject()
|
||||
.endObject().endObject().endObject());
|
||||
DocumentMapper mapper = mapperParser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
BytesReference bytes = BytesReference.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("foo", "1234")
|
||||
.endObject());
|
||||
|
||||
ParsedDocument doc = mapper.parse(SourceToParse.source("test", "_doc", "1", bytes, XContentType.JSON));
|
||||
assertNull(doc.dynamicMappingsUpdate()); // no update since we reused the existing type
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -80,8 +81,10 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.Engine.DeleteResult;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
import org.elasticsearch.index.engine.EngineTestCase;
|
||||
import org.elasticsearch.index.engine.InternalEngine;
|
||||
|
@ -733,7 +736,6 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
return fut.get();
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35850")
|
||||
public void testOperationPermitOnReplicaShards() throws Exception {
|
||||
final ShardId shardId = new ShardId("test", "_na_", 0);
|
||||
final IndexShard indexShard;
|
||||
|
@ -1024,7 +1026,6 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
closeShards(replicaShard, primaryShard);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35850")
|
||||
public void testRestoreLocalHistoryFromTranslogOnPromotion() throws IOException, InterruptedException {
|
||||
final IndexShard indexShard = newStartedShard(false);
|
||||
final int operations = 1024 - scaledRandomIntBetween(0, 1024);
|
||||
|
@ -1089,7 +1090,6 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
closeShard(indexShard, false);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35850")
|
||||
public void testRollbackReplicaEngineOnPromotion() throws IOException, InterruptedException {
|
||||
final IndexShard indexShard = newStartedShard(false);
|
||||
|
||||
|
@ -1433,7 +1433,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
}
|
||||
long refreshCount = shard.refreshStats().getTotal();
|
||||
indexDoc(shard, "_doc", "test");
|
||||
try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test",
|
||||
try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "_doc", "test",
|
||||
new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) {
|
||||
assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1));
|
||||
}
|
||||
|
@ -2133,7 +2133,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
shard.refresh("test");
|
||||
|
||||
try (Engine.GetResult getResult = shard
|
||||
.get(new Engine.Get(false, false, "test", "1",
|
||||
.get(new Engine.Get(false, false, "_doc", "1",
|
||||
new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
|
||||
assertTrue(getResult.exists());
|
||||
assertNotNull(getResult.searcher());
|
||||
|
@ -2175,7 +2175,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertEquals(search.totalHits.value, 1);
|
||||
}
|
||||
try (Engine.GetResult getResult = newShard
|
||||
.get(new Engine.Get(false, false, "test", "1",
|
||||
.get(new Engine.Get(false, false, "_doc", "1",
|
||||
new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
|
||||
assertTrue(getResult.exists());
|
||||
assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader
|
||||
|
@ -3600,11 +3600,125 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
closeShard(shard, false);
|
||||
}
|
||||
|
||||
public void testConcurrentAcquireAllReplicaOperationsPermitsWithPrimaryTermUpdate() throws Exception {
|
||||
final IndexShard replica = newStartedShard(false);
|
||||
indexOnReplicaWithGaps(replica, between(0, 1000), Math.toIntExact(replica.getLocalCheckpoint()));
|
||||
|
||||
final int nbTermUpdates = randomIntBetween(1, 5);
|
||||
|
||||
for (int i = 0; i < nbTermUpdates; i++) {
|
||||
long opPrimaryTerm = replica.getOperationPrimaryTerm() + 1;
|
||||
final long globalCheckpoint = replica.getGlobalCheckpoint();
|
||||
final long maxSeqNoOfUpdatesOrDeletes = replica.getMaxSeqNoOfUpdatesOrDeletes();
|
||||
|
||||
final int operations = scaledRandomIntBetween(5, 32);
|
||||
final CyclicBarrier barrier = new CyclicBarrier(1 + operations);
|
||||
final CountDownLatch latch = new CountDownLatch(operations);
|
||||
|
||||
final Thread[] threads = new Thread[operations];
|
||||
for (int j = 0; j < operations; j++) {
|
||||
threads[j] = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (final BrokenBarrierException | InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
replica.acquireAllReplicaOperationsPermits(
|
||||
opPrimaryTerm,
|
||||
globalCheckpoint,
|
||||
maxSeqNoOfUpdatesOrDeletes,
|
||||
new ActionListener<Releasable>() {
|
||||
@Override
|
||||
public void onResponse(final Releasable releasable) {
|
||||
try (Releasable ignored = releasable) {
|
||||
assertThat(replica.getPendingPrimaryTerm(), greaterThanOrEqualTo(opPrimaryTerm));
|
||||
assertThat(replica.getOperationPrimaryTerm(), equalTo(opPrimaryTerm));
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
try {
|
||||
throw new RuntimeException(e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
}, TimeValue.timeValueMinutes(30L));
|
||||
});
|
||||
threads[j].start();
|
||||
}
|
||||
barrier.await();
|
||||
latch.await();
|
||||
|
||||
for (Thread thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
closeShard(replica, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings threadPoolSettings() {
|
||||
return Settings.builder().put(super.threadPoolSettings()).put("thread_pool.estimated_time_interval", "5ms").build();
|
||||
}
|
||||
|
||||
public void testTypelessDelete() throws IOException {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder("index")
|
||||
.putMapping("some_type", "{ \"properties\": {}}")
|
||||
.settings(settings)
|
||||
.build();
|
||||
IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||
recoverShardFromStore(shard);
|
||||
Engine.IndexResult indexResult = indexDoc(shard, "some_type", "id", "{}");
|
||||
assertTrue(indexResult.isCreated());
|
||||
|
||||
DeleteResult deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "some_other_type", "id", VersionType.INTERNAL);
|
||||
assertFalse(deleteResult.isFound());
|
||||
|
||||
deleteResult = shard.applyDeleteOperationOnPrimary(Versions.MATCH_ANY, "_doc", "id", VersionType.INTERNAL);
|
||||
assertTrue(deleteResult.isFound());
|
||||
|
||||
closeShards(shard);
|
||||
}
|
||||
|
||||
public void testTypelessGet() throws IOException {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder("index")
|
||||
.putMapping("some_type", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
|
||||
.settings(settings)
|
||||
.primaryTerm(0, 1).build();
|
||||
IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||
recoverShardFromStore(shard);
|
||||
Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}");
|
||||
assertTrue(indexResult.isCreated());
|
||||
|
||||
org.elasticsearch.index.engine.Engine.GetResult getResult = shard.get(
|
||||
new Engine.Get(true, true, "some_type", "0", new Term("_id", Uid.encodeId("0"))));
|
||||
assertTrue(getResult.exists());
|
||||
getResult.close();
|
||||
|
||||
getResult = shard.get(new Engine.Get(true, true, "some_other_type", "0", new Term("_id", Uid.encodeId("0"))));
|
||||
assertFalse(getResult.exists());
|
||||
getResult.close();
|
||||
|
||||
getResult = shard.get(new Engine.Get(true, true, "_doc", "0", new Term("_id", Uid.encodeId("0"))));
|
||||
assertTrue(getResult.exists());
|
||||
getResult.close();
|
||||
|
||||
closeShards(shard);
|
||||
}
|
||||
|
||||
/**
|
||||
* Randomizes the usage of {@link IndexShard#acquireReplicaOperationPermit(long, long, long, ActionListener, String, Object)} and
|
||||
* {@link IndexShard#acquireAllReplicaOperationsPermits(long, long, long, ActionListener, TimeValue)} in order to acquire a permit.
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexWriterConfig;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
|
@ -114,7 +115,7 @@ public class RefreshListenersTests extends ESTestCase {
|
|||
// we don't need to notify anybody in this test
|
||||
}
|
||||
};
|
||||
store.createEmpty();
|
||||
store.createEmpty(Version.CURRENT.luceneVersion);
|
||||
final long primaryTerm = randomNonNegativeLong();
|
||||
final String translogUUID =
|
||||
Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm);
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.store.FSDirectory;
|
|||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanation;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
|
@ -99,6 +100,7 @@ import static org.hamcrest.Matchers.hasSize;
|
|||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36189")
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 0)
|
||||
public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.index.shard;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -77,4 +78,30 @@ public class ShardGetServiceTests extends IndexShardTestCase {
|
|||
|
||||
closeShards(primary);
|
||||
}
|
||||
|
||||
public void testTypelessGetForUpdate() throws IOException {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder("index")
|
||||
.putMapping("some_type", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
|
||||
.settings(settings)
|
||||
.primaryTerm(0, 1).build();
|
||||
IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||
recoverShardFromStore(shard);
|
||||
Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}");
|
||||
assertTrue(indexResult.isCreated());
|
||||
|
||||
GetResult getResult = shard.getService().getForUpdate("some_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL);
|
||||
assertTrue(getResult.isExists());
|
||||
|
||||
getResult = shard.getService().getForUpdate("some_other_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL);
|
||||
assertFalse(getResult.isExists());
|
||||
|
||||
getResult = shard.getService().getForUpdate("_doc", "0", Versions.MATCH_ANY, VersionType.INTERNAL);
|
||||
assertTrue(getResult.isExists());
|
||||
|
||||
closeShards(shard);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1035,7 +1035,7 @@ public class StoreTests extends ESTestCase {
|
|||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
try (Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId))) {
|
||||
|
||||
store.createEmpty();
|
||||
store.createEmpty(Version.LATEST);
|
||||
|
||||
// remove the history uuid
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(null)
|
||||
|
@ -1067,7 +1067,7 @@ public class StoreTests extends ESTestCase {
|
|||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
try (Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId))) {
|
||||
|
||||
store.createEmpty();
|
||||
store.createEmpty(Version.LATEST);
|
||||
|
||||
SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory());
|
||||
assertThat(segmentInfos.getUserData(), hasKey(Engine.HISTORY_UUID_KEY));
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.document;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestRequest.Method;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestChannel;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
import org.elasticsearch.usage.UsageService;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RestTermVectorsActionTests extends ESTestCase {
|
||||
private RestController controller;
|
||||
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
controller = new RestController(Collections.emptySet(), null,
|
||||
mock(NodeClient.class),
|
||||
new NoneCircuitBreakerService(),
|
||||
new UsageService());
|
||||
new RestTermVectorsAction(Settings.EMPTY, controller);
|
||||
}
|
||||
|
||||
public void testDeprecatedEndpoint() {
|
||||
RestRequest request = new FakeRestRequest.Builder(xContentRegistry())
|
||||
.withMethod(Method.POST)
|
||||
.withPath("/some_index/some_type/some_id/_termvector")
|
||||
.build();
|
||||
|
||||
performRequest(request);
|
||||
assertWarnings("[POST /{index}/{type}/{id}/_termvector] is deprecated! Use" +
|
||||
" [POST /{index}/{type}/{id}/_termvectors] instead.");
|
||||
}
|
||||
|
||||
private void performRequest(RestRequest request) {
|
||||
RestChannel channel = new FakeRestChannel(request, false, 1);
|
||||
ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
|
||||
controller.dispatchRequest(request, channel, threadContext);
|
||||
}
|
||||
}
|
|
@ -21,12 +21,14 @@ package org.elasticsearch.search;
|
|||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchTask;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
|
@ -76,6 +78,7 @@ import java.io.IOException;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -417,6 +420,44 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* test that creating more than the allowed number of scroll contexts throws an exception
|
||||
*/
|
||||
public void testMaxOpenScrollContexts() throws RuntimeException {
|
||||
createIndex("index");
|
||||
client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
|
||||
|
||||
final SearchService service = getInstanceFromNode(SearchService.class);
|
||||
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
|
||||
final IndexShard indexShard = indexService.getShard(0);
|
||||
|
||||
// Open all possible scrolls, clear some of them, then open more until the limit is reached
|
||||
LinkedList<String> clearScrollIds = new LinkedList<>();
|
||||
|
||||
for (int i = 0; i < SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); i++) {
|
||||
SearchResponse searchResponse = client().prepareSearch("index").setSize(1).setScroll("1m").get();
|
||||
|
||||
if (randomInt(4) == 0) clearScrollIds.addLast(searchResponse.getScrollId());
|
||||
}
|
||||
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
clearScrollRequest.setScrollIds(clearScrollIds);
|
||||
client().clearScroll(clearScrollRequest);
|
||||
|
||||
for (int i = 0; i < clearScrollIds.size(); i++) {
|
||||
client().prepareSearch("index").setSize(1).setScroll("1m").get();
|
||||
}
|
||||
|
||||
ElasticsearchException ex = expectThrows(ElasticsearchException.class,
|
||||
() -> service.createAndPutContext(new ShardScrollRequestTest(indexShard.shardId())));
|
||||
assertEquals(
|
||||
"Trying to create too many scroll contexts. Must be less than or equal to: [" +
|
||||
SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY) + "]. " +
|
||||
"This limit can be set by changing the [search.max_open_scroll_context] setting.",
|
||||
ex.getMessage());
|
||||
}
|
||||
|
||||
public static class FailOnRewriteQueryPlugin extends Plugin implements SearchPlugin {
|
||||
@Override
|
||||
public List<QuerySpec<?>> getQueries() {
|
||||
|
@ -472,6 +513,22 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static class ShardScrollRequestTest extends ShardSearchLocalRequest {
|
||||
private Scroll scroll;
|
||||
|
||||
ShardScrollRequestTest(ShardId shardId) {
|
||||
super(shardId, 1, SearchType.DEFAULT, new SearchSourceBuilder(),
|
||||
new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, true, null, null);
|
||||
|
||||
this.scroll = new Scroll(TimeValue.timeValueMinutes(1));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scroll scroll() {
|
||||
return this.scroll;
|
||||
}
|
||||
}
|
||||
|
||||
public void testCanMatch() throws IOException {
|
||||
createIndex("index");
|
||||
final SearchService service = getInstanceFromNode(SearchService.class);
|
||||
|
|
|
@ -97,6 +97,16 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase {
|
|||
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue());
|
||||
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
|
||||
|
||||
logger.info("--> check that trying to create a repository with the same settings repeatedly does not update cluster state");
|
||||
String beforeStateUuid = clusterStateResponse.getState().stateUUID();
|
||||
assertThat(
|
||||
client.admin().cluster().preparePutRepository("test-repo-1")
|
||||
.setType("fs").setSettings(Settings.builder()
|
||||
.put("location", location)
|
||||
).get().isAcknowledged(),
|
||||
equalTo(true));
|
||||
assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID());
|
||||
|
||||
logger.info("--> delete repository test-repo-1");
|
||||
client.admin().cluster().prepareDeleteRepository("test-repo-1").get();
|
||||
repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
|
||||
|
|
|
@ -64,7 +64,7 @@ thirdPartyAudit.excludes = [
|
|||
|
||||
task namingConventionsMain(type: org.elasticsearch.gradle.precommit.NamingConventionsTask) {
|
||||
checkForTestsInMain = true
|
||||
javaHome = project.runtimeJavaHome
|
||||
javaHome = project.compilerJavaHome
|
||||
}
|
||||
precommit.dependsOn namingConventionsMain
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -499,7 +500,7 @@ public abstract class EngineTestCase extends ESTestCase {
|
|||
final Store store = config.getStore();
|
||||
final Directory directory = store.directory();
|
||||
if (Lucene.indexExists(directory) == false) {
|
||||
store.createEmpty();
|
||||
store.createEmpty(config.getIndexSettings().getIndexVersionCreated().luceneVersion);
|
||||
final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
|
||||
SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
|
||||
store.associateIndexWithNewTranslog(translogUuid);
|
||||
|
@ -704,6 +705,32 @@ public abstract class EngineTestCase extends ESTestCase {
|
|||
return ops;
|
||||
}
|
||||
|
||||
public List<Engine.Operation> generateReplicaHistory(int numOps, boolean allowGapInSeqNo) {
|
||||
long seqNo = 0;
|
||||
List<Engine.Operation> operations = new ArrayList<>(numOps);
|
||||
for (int i = 0; i < numOps; i++) {
|
||||
String id = Integer.toString(between(1, 100));
|
||||
final ParsedDocument doc = EngineTestCase.createParsedDoc(id, null);
|
||||
if (randomBoolean()) {
|
||||
operations.add(new Engine.Index(EngineTestCase.newUid(doc), doc, seqNo, primaryTerm.get(),
|
||||
i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis(),
|
||||
-1, true));
|
||||
} else if (randomBoolean()) {
|
||||
operations.add(new Engine.Delete(doc.type(), doc.id(), EngineTestCase.newUid(doc), seqNo, primaryTerm.get(),
|
||||
i, null, Engine.Operation.Origin.REPLICA, threadPool.relativeTimeInMillis()));
|
||||
} else {
|
||||
operations.add(new Engine.NoOp(seqNo, primaryTerm.get(), Engine.Operation.Origin.REPLICA,
|
||||
threadPool.relativeTimeInMillis(), "test-" + i));
|
||||
}
|
||||
seqNo++;
|
||||
if (allowGapInSeqNo && rarely()) {
|
||||
seqNo++;
|
||||
}
|
||||
}
|
||||
Randomness.shuffle(operations);
|
||||
return operations;
|
||||
}
|
||||
|
||||
public static void assertOpsOnReplica(
|
||||
final List<Engine.Operation> ops,
|
||||
final InternalEngine replicaEngine,
|
||||
|
@ -788,14 +815,7 @@ public abstract class EngineTestCase extends ESTestCase {
|
|||
int docOffset;
|
||||
while ((docOffset = offset.incrementAndGet()) < ops.size()) {
|
||||
try {
|
||||
final Engine.Operation op = ops.get(docOffset);
|
||||
if (op instanceof Engine.Index) {
|
||||
engine.index((Engine.Index) op);
|
||||
} else if (op instanceof Engine.Delete){
|
||||
engine.delete((Engine.Delete) op);
|
||||
} else {
|
||||
engine.noOp((Engine.NoOp) op);
|
||||
}
|
||||
applyOperation(engine, ops.get(docOffset));
|
||||
if ((docOffset + 1) % 4 == 0) {
|
||||
engine.refresh("test");
|
||||
}
|
||||
|
@ -814,6 +834,36 @@ public abstract class EngineTestCase extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static void applyOperations(Engine engine, List<Engine.Operation> operations) throws IOException {
|
||||
for (Engine.Operation operation : operations) {
|
||||
applyOperation(engine, operation);
|
||||
if (randomInt(100) < 10) {
|
||||
engine.refresh("test");
|
||||
}
|
||||
if (rarely()) {
|
||||
engine.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static Engine.Result applyOperation(Engine engine, Engine.Operation operation) throws IOException {
|
||||
final Engine.Result result;
|
||||
switch (operation.operationType()) {
|
||||
case INDEX:
|
||||
result = engine.index((Engine.Index) operation);
|
||||
break;
|
||||
case DELETE:
|
||||
result = engine.delete((Engine.Delete) operation);
|
||||
break;
|
||||
case NO_OP:
|
||||
result = engine.noOp((Engine.NoOp) operation);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("No operation defined for [" + operation + "]");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a collection of tuples of docId, sequence number, and primary term of all live documents in the provided engine.
|
||||
*/
|
||||
|
|
|
@ -160,15 +160,22 @@ public final class CcrLicenseChecker {
|
|||
final ClusterStateRequest request,
|
||||
final Consumer<Exception> onFailure,
|
||||
final Consumer<ClusterState> leaderClusterStateConsumer) {
|
||||
checkRemoteClusterLicenseAndFetchClusterState(
|
||||
try {
|
||||
Client remoteClient = systemClient(client.getRemoteClusterClient(clusterAlias));
|
||||
checkRemoteClusterLicenseAndFetchClusterState(
|
||||
client,
|
||||
clusterAlias,
|
||||
systemClient(client.getRemoteClusterClient(clusterAlias)),
|
||||
remoteClient,
|
||||
request,
|
||||
onFailure,
|
||||
leaderClusterStateConsumer,
|
||||
CcrLicenseChecker::clusterStateNonCompliantRemoteLicense,
|
||||
e -> clusterStateUnknownRemoteLicense(clusterAlias, e));
|
||||
} catch (Exception e) {
|
||||
// client.getRemoteClusterClient(...) can fail with a IllegalArgumentException if remote
|
||||
// connection is unknown
|
||||
onFailure.accept(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -403,6 +403,13 @@ public class AutoFollowCoordinator implements ClusterStateApplier {
|
|||
return currentState -> {
|
||||
AutoFollowMetadata currentAutoFollowMetadata = currentState.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
Map<String, List<String>> newFollowedIndexUUIDS = new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs());
|
||||
if (newFollowedIndexUUIDS.containsKey(name) == false) {
|
||||
// A delete auto follow pattern request can have removed the auto follow pattern while we want to update
|
||||
// the auto follow metadata with the fact that an index was successfully auto followed. If this
|
||||
// happens, we can just skip this step.
|
||||
return currentState;
|
||||
}
|
||||
|
||||
newFollowedIndexUUIDS.compute(name, (key, existingUUIDs) -> {
|
||||
assert existingUUIDs != null;
|
||||
List<String> newUUIDs = new ArrayList<>(existingUUIDs);
|
||||
|
|
|
@ -40,8 +40,10 @@ import java.util.function.BiConsumer;
|
|||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator.AutoFollower.recordLeaderIndexAsFollowFunction;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
|
@ -384,6 +386,33 @@ public class AutoFollowCoordinatorTests extends ESTestCase {
|
|||
assertThat(result.get(1).getName(), equalTo("index2"));
|
||||
}
|
||||
|
||||
public void testRecordLeaderIndexAsFollowFunction() {
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(Collections.emptyMap(),
|
||||
Collections.singletonMap("pattern1", Collections.emptyList()), Collections.emptyMap());
|
||||
ClusterState clusterState = new ClusterState.Builder(new ClusterName("name"))
|
||||
.metaData(new MetaData.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata))
|
||||
.build();
|
||||
Function<ClusterState, ClusterState> function = recordLeaderIndexAsFollowFunction("pattern1", new Index("index1", "index1"));
|
||||
|
||||
ClusterState result = function.apply(clusterState);
|
||||
AutoFollowMetadata autoFollowMetadataResult = result.metaData().custom(AutoFollowMetadata.TYPE);
|
||||
assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1"), notNullValue());
|
||||
assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1").size(), equalTo(1));
|
||||
assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1").get(0), equalTo("index1"));
|
||||
}
|
||||
|
||||
public void testRecordLeaderIndexAsFollowFunctionNoEntry() {
|
||||
AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(Collections.emptyMap(), Collections.emptyMap(),
|
||||
Collections.emptyMap());
|
||||
ClusterState clusterState = new ClusterState.Builder(new ClusterName("name"))
|
||||
.metaData(new MetaData.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata))
|
||||
.build();
|
||||
Function<ClusterState, ClusterState> function = recordLeaderIndexAsFollowFunction("pattern1", new Index("index1", "index1"));
|
||||
|
||||
ClusterState result = function.apply(clusterState);
|
||||
assertThat(result, sameInstance(clusterState));
|
||||
}
|
||||
|
||||
public void testGetFollowerIndexName() {
|
||||
AutoFollowPattern autoFollowPattern = new AutoFollowPattern("remote", Collections.singletonList("metrics-*"), null, null,
|
||||
null, null, null, null, null, null, null, null, null);
|
||||
|
|
|
@ -280,7 +280,7 @@ public class FollowingEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private FollowingEngine createEngine(Store store, EngineConfig config) throws IOException {
|
||||
store.createEmpty();
|
||||
store.createEmpty(config.getIndexSettings().getIndexVersionCreated().luceneVersion);
|
||||
final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
|
||||
SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L);
|
||||
store.associateIndexWithNewTranslog(translogUuid);
|
||||
|
@ -485,7 +485,7 @@ public class FollowingEngineTests extends ESTestCase {
|
|||
IndexMetaData leaderIndexMetaData = IndexMetaData.builder(index.getName()).settings(leaderSettings).build();
|
||||
IndexSettings leaderIndexSettings = new IndexSettings(leaderIndexMetaData, leaderSettings);
|
||||
try (Store leaderStore = createStore(shardId, leaderIndexSettings, newDirectory())) {
|
||||
leaderStore.createEmpty();
|
||||
leaderStore.createEmpty(leaderIndexMetaData.getCreationVersion().luceneVersion);
|
||||
EngineConfig leaderConfig = engineConfig(shardId, leaderIndexSettings, threadPool, leaderStore, logger, xContentRegistry());
|
||||
leaderStore.associateIndexWithNewTranslog(Translog.createEmptyTranslog(
|
||||
leaderConfig.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L));
|
||||
|
|
|
@ -13,9 +13,11 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CachedSupplier;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParseException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.AbstractQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
@ -31,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages;
|
|||
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
|
||||
import org.elasticsearch.xpack.core.ml.utils.MlStrings;
|
||||
import org.elasticsearch.xpack.core.ml.utils.ToXContentParams;
|
||||
import org.elasticsearch.xpack.core.ml.utils.XContentObjectTransformer;
|
||||
import org.elasticsearch.xpack.core.ml.utils.time.TimeUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -43,6 +46,7 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
/**
|
||||
* Datafeed configuration options. Describes where to proactively pull input
|
||||
|
@ -60,6 +64,45 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
private static final int TWO_MINS_SECONDS = 2 * SECONDS_IN_MINUTE;
|
||||
private static final int TWENTY_MINS_SECONDS = 20 * SECONDS_IN_MINUTE;
|
||||
private static final int HALF_DAY_SECONDS = 12 * 60 * SECONDS_IN_MINUTE;
|
||||
static final XContentObjectTransformer<QueryBuilder> QUERY_TRANSFORMER = XContentObjectTransformer.queryBuilderTransformer();
|
||||
private static final BiFunction<Map<String, Object>, String, QueryBuilder> lazyQueryParser = (objectMap, id) -> {
|
||||
try {
|
||||
return QUERY_TRANSFORMER.fromMap(objectMap);
|
||||
} catch (IOException | XContentParseException exception) {
|
||||
// Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user
|
||||
if (exception.getCause() instanceof IllegalArgumentException) {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT,
|
||||
id,
|
||||
exception.getCause().getMessage()),
|
||||
exception.getCause());
|
||||
} else {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, exception, id),
|
||||
exception);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static final XContentObjectTransformer<AggregatorFactories.Builder> AGG_TRANSFORMER = XContentObjectTransformer.aggregatorTransformer();
|
||||
private static final BiFunction<Map<String, Object>, String, AggregatorFactories.Builder> lazyAggParser = (objectMap, id) -> {
|
||||
try {
|
||||
return AGG_TRANSFORMER.fromMap(objectMap);
|
||||
} catch (IOException | XContentParseException exception) {
|
||||
// Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user
|
||||
if (exception.getCause() instanceof IllegalArgumentException) {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT,
|
||||
id,
|
||||
exception.getCause().getMessage()),
|
||||
exception.getCause());
|
||||
} else {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, exception.getMessage(), id),
|
||||
exception);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Used for QueryPage
|
||||
public static final ParseField RESULTS_FIELD = new ParseField("datafeeds");
|
||||
|
@ -90,6 +133,21 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
public static final ObjectParser<Builder, Void> LENIENT_PARSER = createParser(true);
|
||||
public static final ObjectParser<Builder, Void> STRICT_PARSER = createParser(false);
|
||||
|
||||
public static void validateAggregations(AggregatorFactories.Builder aggregations) {
|
||||
if (aggregations == null) {
|
||||
return;
|
||||
}
|
||||
Collection<AggregationBuilder> aggregatorFactories = aggregations.getAggregatorFactories();
|
||||
if (aggregatorFactories.isEmpty()) {
|
||||
throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM);
|
||||
}
|
||||
|
||||
AggregationBuilder histogramAggregation = ExtractorUtils.getHistogramAggregation(aggregatorFactories);
|
||||
Builder.checkNoMoreHistogramAggregations(histogramAggregation.getSubAggregations());
|
||||
Builder.checkHistogramAggregationHasChildMaxTimeAgg(histogramAggregation);
|
||||
Builder.checkHistogramIntervalIsPositive(histogramAggregation);
|
||||
}
|
||||
|
||||
private static ObjectParser<Builder, Void> createParser(boolean ignoreUnknownFields) {
|
||||
ObjectParser<Builder, Void> parser = new ObjectParser<>("datafeed_config", ignoreUnknownFields, Builder::new);
|
||||
|
||||
|
@ -102,9 +160,15 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
|
||||
parser.declareString((builder, val) ->
|
||||
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
|
||||
parser.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
|
||||
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS);
|
||||
parser.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
|
||||
if (ignoreUnknownFields) {
|
||||
parser.declareObject(Builder::setQuery, (p, c) -> p.map(), QUERY);
|
||||
parser.declareObject(Builder::setAggregations, (p, c) -> p.map(), AGGREGATIONS);
|
||||
parser.declareObject(Builder::setAggregations, (p, c) -> p.map(), AGGS);
|
||||
} else {
|
||||
parser.declareObject(Builder::setParsedQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
|
||||
parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS);
|
||||
parser.declareObject(Builder::setParsedAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
|
||||
}
|
||||
parser.declareObject(Builder::setScriptFields, (p, c) -> {
|
||||
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
|
||||
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
|
@ -146,16 +210,18 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
|
||||
private final List<String> indices;
|
||||
private final List<String> types;
|
||||
private final QueryBuilder query;
|
||||
private final AggregatorFactories.Builder aggregations;
|
||||
private final Map<String, Object> query;
|
||||
private final Map<String, Object> aggregations;
|
||||
private final List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private final Integer scrollSize;
|
||||
private final ChunkingConfig chunkingConfig;
|
||||
private final Map<String, String> headers;
|
||||
private final DelayedDataCheckConfig delayedDataCheckConfig;
|
||||
private final CachedSupplier<QueryBuilder> querySupplier;
|
||||
private final CachedSupplier<AggregatorFactories.Builder> aggSupplier;
|
||||
|
||||
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
|
||||
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
|
||||
Map<String, Object> query, Map<String, Object> aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
|
||||
Integer scrollSize, ChunkingConfig chunkingConfig, Map<String, String> headers,
|
||||
DelayedDataCheckConfig delayedDataCheckConfig) {
|
||||
this.id = id;
|
||||
|
@ -171,6 +237,8 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
this.chunkingConfig = chunkingConfig;
|
||||
this.headers = Collections.unmodifiableMap(headers);
|
||||
this.delayedDataCheckConfig = delayedDataCheckConfig;
|
||||
this.querySupplier = new CachedSupplier<>(() -> lazyQueryParser.apply(query, id));
|
||||
this.aggSupplier = new CachedSupplier<>(() -> lazyAggParser.apply(aggregations, id));
|
||||
}
|
||||
|
||||
public DatafeedConfig(StreamInput in) throws IOException {
|
||||
|
@ -188,8 +256,17 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
} else {
|
||||
this.types = null;
|
||||
}
|
||||
this.query = in.readNamedWriteable(QueryBuilder.class);
|
||||
this.aggregations = in.readOptionalWriteable(AggregatorFactories.Builder::new);
|
||||
if (in.getVersion().before(Version.V_6_6_0)) {
|
||||
this.query = QUERY_TRANSFORMER.toMap(in.readNamedWriteable(QueryBuilder.class));
|
||||
this.aggregations = AGG_TRANSFORMER.toMap(in.readOptionalWriteable(AggregatorFactories.Builder::new));
|
||||
} else {
|
||||
this.query = in.readMap();
|
||||
if (in.readBoolean()) {
|
||||
this.aggregations = in.readMap();
|
||||
} else {
|
||||
this.aggregations = null;
|
||||
}
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
this.scriptFields = Collections.unmodifiableList(in.readList(SearchSourceBuilder.ScriptField::new));
|
||||
} else {
|
||||
|
@ -207,6 +284,8 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
} else {
|
||||
delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig();
|
||||
}
|
||||
this.querySupplier = new CachedSupplier<>(() -> lazyQueryParser.apply(query, id));
|
||||
this.aggSupplier = new CachedSupplier<>(() -> lazyAggParser.apply(aggregations, id));
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
|
@ -237,11 +316,19 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
return scrollSize;
|
||||
}
|
||||
|
||||
public QueryBuilder getQuery() {
|
||||
public QueryBuilder getParsedQuery() {
|
||||
return querySupplier.get();
|
||||
}
|
||||
|
||||
public Map<String, Object> getQuery() {
|
||||
return query;
|
||||
}
|
||||
|
||||
public AggregatorFactories.Builder getAggregations() {
|
||||
public AggregatorFactories.Builder getParsedAggregations() {
|
||||
return aggSupplier.get();
|
||||
}
|
||||
|
||||
public Map<String, Object> getAggregations() {
|
||||
return aggregations;
|
||||
}
|
||||
|
||||
|
@ -249,14 +336,14 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
* Returns the histogram's interval as epoch millis.
|
||||
*/
|
||||
public long getHistogramIntervalMillis() {
|
||||
return ExtractorUtils.getHistogramIntervalMillis(aggregations);
|
||||
return ExtractorUtils.getHistogramIntervalMillis(getParsedAggregations());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {@code true} when there are non-empty aggregations, {@code false} otherwise
|
||||
*/
|
||||
public boolean hasAggregations() {
|
||||
return aggregations != null && aggregations.count() > 0;
|
||||
return aggregations != null && aggregations.size() > 0;
|
||||
}
|
||||
|
||||
public List<SearchSourceBuilder.ScriptField> getScriptFields() {
|
||||
|
@ -293,8 +380,16 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeNamedWriteable(query);
|
||||
out.writeOptionalWriteable(aggregations);
|
||||
if (out.getVersion().before(Version.V_6_6_0)) {
|
||||
out.writeNamedWriteable(getParsedQuery());
|
||||
out.writeOptionalWriteable(getParsedAggregations());
|
||||
} else {
|
||||
out.writeMap(query);
|
||||
out.writeBoolean(aggregations != null);
|
||||
if (aggregations != null) {
|
||||
out.writeMap(aggregations);
|
||||
}
|
||||
}
|
||||
if (scriptFields != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeList(scriptFields);
|
||||
|
@ -454,15 +549,20 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
private TimeValue frequency;
|
||||
private List<String> indices = Collections.emptyList();
|
||||
private List<String> types = Collections.emptyList();
|
||||
private QueryBuilder query = QueryBuilders.matchAllQuery();
|
||||
private AggregatorFactories.Builder aggregations;
|
||||
private Map<String, Object> query;
|
||||
private Map<String, Object> aggregations;
|
||||
private List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private Integer scrollSize = DEFAULT_SCROLL_SIZE;
|
||||
private ChunkingConfig chunkingConfig;
|
||||
private Map<String, String> headers = Collections.emptyMap();
|
||||
private DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig();
|
||||
|
||||
|
||||
|
||||
public Builder() {
|
||||
try {
|
||||
this.query = QUERY_TRANSFORMER.toMap(QueryBuilders.matchAllQuery());
|
||||
} catch (IOException ex) { /*Should never happen*/ }
|
||||
}
|
||||
|
||||
public Builder(String id, String jobId) {
|
||||
|
@ -517,11 +617,47 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
this.frequency = frequency;
|
||||
}
|
||||
|
||||
public void setQuery(QueryBuilder query) {
|
||||
public void setParsedQuery(QueryBuilder query) {
|
||||
try {
|
||||
setQuery(QUERY_TRANSFORMER.toMap(ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName())));
|
||||
} catch (IOException | XContentParseException exception) {
|
||||
if (exception.getCause() instanceof IllegalArgumentException) {
|
||||
// Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT,
|
||||
id,
|
||||
exception.getCause().getMessage()),
|
||||
exception.getCause());
|
||||
} else {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_QUERY_BAD_FORMAT, id, exception.getMessage()), exception);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void setQuery(Map<String, Object> query) {
|
||||
this.query = ExceptionsHelper.requireNonNull(query, QUERY.getPreferredName());
|
||||
}
|
||||
|
||||
public void setAggregations(AggregatorFactories.Builder aggregations) {
|
||||
public void setParsedAggregations(AggregatorFactories.Builder aggregations) {
|
||||
try {
|
||||
setAggregations(AGG_TRANSFORMER.toMap(aggregations));
|
||||
} catch (IOException | XContentParseException exception) {
|
||||
// Certain thrown exceptions wrap up the real Illegal argument making it hard to determine cause for the user
|
||||
if (exception.getCause() instanceof IllegalArgumentException) {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT,
|
||||
id,
|
||||
exception.getCause().getMessage()),
|
||||
exception.getCause());
|
||||
} else {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_AGG_BAD_FORMAT, id, exception.getMessage()), exception);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void setAggregations(Map<String, Object> aggregations) {
|
||||
this.aggregations = aggregations;
|
||||
}
|
||||
|
||||
|
@ -564,30 +700,22 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
throw invalidOptionValue(TYPES.getPreferredName(), types);
|
||||
}
|
||||
|
||||
validateAggregations();
|
||||
validateScriptFields();
|
||||
setDefaultChunkingConfig();
|
||||
|
||||
setDefaultQueryDelay();
|
||||
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
|
||||
chunkingConfig, headers, delayedDataCheckConfig);
|
||||
}
|
||||
|
||||
void validateAggregations() {
|
||||
void validateScriptFields() {
|
||||
if (aggregations == null) {
|
||||
return;
|
||||
}
|
||||
if (scriptFields != null && !scriptFields.isEmpty()) {
|
||||
throw ExceptionsHelper.badRequestException(
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_CANNOT_USE_SCRIPT_FIELDS_WITH_AGGS));
|
||||
Messages.getMessage(Messages.DATAFEED_CONFIG_CANNOT_USE_SCRIPT_FIELDS_WITH_AGGS));
|
||||
}
|
||||
Collection<AggregationBuilder> aggregatorFactories = aggregations.getAggregatorFactories();
|
||||
if (aggregatorFactories.isEmpty()) {
|
||||
throw ExceptionsHelper.badRequestException(Messages.DATAFEED_AGGREGATIONS_REQUIRES_DATE_HISTOGRAM);
|
||||
}
|
||||
|
||||
AggregationBuilder histogramAggregation = ExtractorUtils.getHistogramAggregation(aggregatorFactories);
|
||||
checkNoMoreHistogramAggregations(histogramAggregation.getSubAggregations());
|
||||
checkHistogramAggregationHasChildMaxTimeAgg(histogramAggregation);
|
||||
checkHistogramIntervalIsPositive(histogramAggregation);
|
||||
}
|
||||
|
||||
private static void checkNoMoreHistogramAggregations(Collection<AggregationBuilder> aggregations) {
|
||||
|
@ -630,7 +758,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
|||
if (aggregations == null) {
|
||||
chunkingConfig = ChunkingConfig.newAuto();
|
||||
} else {
|
||||
long histogramIntervalMillis = ExtractorUtils.getHistogramIntervalMillis(aggregations);
|
||||
long histogramIntervalMillis = ExtractorUtils.getHistogramIntervalMillis(lazyAggParser.apply(aggregations, id));
|
||||
chunkingConfig = ChunkingConfig.newManual(TimeValue.timeValueMillis(
|
||||
DEFAULT_AGGREGATION_CHUNKING_BUCKETS * histogramIntervalMillis));
|
||||
}
|
||||
|
|
|
@ -295,10 +295,11 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
|
|||
builder.setTypes(types);
|
||||
}
|
||||
if (query != null) {
|
||||
builder.setQuery(query);
|
||||
builder.setParsedQuery(query);
|
||||
}
|
||||
if (aggregations != null) {
|
||||
builder.setAggregations(aggregations);
|
||||
DatafeedConfig.validateAggregations(aggregations);
|
||||
builder.setParsedAggregations(aggregations);
|
||||
}
|
||||
if (scriptFields != null) {
|
||||
builder.setScriptFields(scriptFields);
|
||||
|
@ -371,9 +372,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
|
|||
&& (queryDelay == null || Objects.equals(queryDelay, datafeed.getQueryDelay()))
|
||||
&& (indices == null || Objects.equals(indices, datafeed.getIndices()))
|
||||
&& (types == null || Objects.equals(types, datafeed.getTypes()))
|
||||
&& (query == null || Objects.equals(query, datafeed.getQuery()))
|
||||
&& (query == null || Objects.equals(query, datafeed.getParsedQuery()))
|
||||
&& (scrollSize == null || Objects.equals(scrollSize, datafeed.getQueryDelay()))
|
||||
&& (aggregations == null || Objects.equals(aggregations, datafeed.getAggregations()))
|
||||
&& (aggregations == null || Objects.equals(aggregations, datafeed.getParsedAggregations()))
|
||||
&& (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields()))
|
||||
&& (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig()))
|
||||
&& (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig()));
|
||||
|
|
|
@ -26,6 +26,8 @@ public final class Messages {
|
|||
"delayed_data_check_config: check_window [{0}] must be greater than the bucket_span [{1}]";
|
||||
public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS =
|
||||
"delayed_data_check_config: check_window [{0}] must be less than 10,000x the bucket_span [{1}]";
|
||||
public static final String DATAFEED_CONFIG_QUERY_BAD_FORMAT = "Datafeed [{0}] query is not parsable: {1}";
|
||||
public static final String DATAFEED_CONFIG_AGG_BAD_FORMAT = "Datafeed [{0}] aggregations are not parsable: {1}";
|
||||
|
||||
public static final String DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY = "A job configured with datafeed cannot support latency";
|
||||
public static final String DATAFEED_NOT_FOUND = "No datafeed with id [{0}] exists";
|
||||
|
|
|
@ -61,6 +61,9 @@ public class XContentObjectTransformer<T extends ToXContentObject> {
|
|||
}
|
||||
|
||||
public T fromMap(Map<String, Object> stringObjectMap) throws IOException {
|
||||
if (stringObjectMap == null) {
|
||||
return null;
|
||||
}
|
||||
LoggingDeprecationAccumulationHandler deprecationLogger = new LoggingDeprecationAccumulationHandler();
|
||||
try(XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(stringObjectMap);
|
||||
XContentParser parser = XContentType.JSON
|
||||
|
@ -74,6 +77,9 @@ public class XContentObjectTransformer<T extends ToXContentObject> {
|
|||
}
|
||||
|
||||
public Map<String, Object> toMap(T object) throws IOException {
|
||||
if (object == null) {
|
||||
return null;
|
||||
}
|
||||
try(XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) {
|
||||
XContentBuilder content = object.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS);
|
||||
return XContentHelper.convertToMap(BytesReference.bytes(content), true, XContentType.JSON).v2();
|
||||
|
|
|
@ -67,7 +67,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
builder.setIndices(randomStringList(1, 10));
|
||||
builder.setTypes(randomStringList(0, 10));
|
||||
if (randomBoolean()) {
|
||||
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
|
||||
builder.setParsedQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
|
||||
}
|
||||
boolean addScriptFields = randomBoolean();
|
||||
if (addScriptFields) {
|
||||
|
@ -91,7 +91,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
|
||||
aggs.addAggregator(AggregationBuilders.dateHistogram("buckets")
|
||||
.interval(aggHistogramInterval).subAggregation(maxTime).field("time"));
|
||||
builder.setAggregations(aggs);
|
||||
builder.setParsedAggregations(aggs);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
|
||||
|
@ -155,6 +155,43 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
" \"scroll_size\": 1234\n" +
|
||||
"}";
|
||||
|
||||
private static final String ANACHRONISTIC_QUERY_DATAFEED = "{\n" +
|
||||
" \"datafeed_id\": \"farequote-datafeed\",\n" +
|
||||
" \"job_id\": \"farequote\",\n" +
|
||||
" \"frequency\": \"1h\",\n" +
|
||||
" \"indices\": [\"farequote1\", \"farequote2\"],\n" +
|
||||
//query:match:type stopped being supported in 6.x
|
||||
" \"query\": {\"match\" : {\"query\":\"fieldName\", \"type\": \"phrase\"}},\n" +
|
||||
" \"scroll_size\": 1234\n" +
|
||||
"}";
|
||||
|
||||
private static final String ANACHRONISTIC_AGG_DATAFEED = "{\n" +
|
||||
" \"datafeed_id\": \"farequote-datafeed\",\n" +
|
||||
" \"job_id\": \"farequote\",\n" +
|
||||
" \"frequency\": \"1h\",\n" +
|
||||
" \"indices\": [\"farequote1\", \"farequote2\"],\n" +
|
||||
" \"aggregations\": {\n" +
|
||||
" \"buckets\": {\n" +
|
||||
" \"date_histogram\": {\n" +
|
||||
" \"field\": \"time\",\n" +
|
||||
" \"interval\": \"360s\",\n" +
|
||||
" \"time_zone\": \"UTC\"\n" +
|
||||
" },\n" +
|
||||
" \"aggregations\": {\n" +
|
||||
" \"time\": {\n" +
|
||||
" \"max\": {\"field\": \"time\"}\n" +
|
||||
" },\n" +
|
||||
" \"airline\": {\n" +
|
||||
" \"terms\": {\n" +
|
||||
" \"field\": \"airline\",\n" +
|
||||
" \"size\": 0\n" + //size: 0 stopped being supported in 6.x
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
public void testFutureConfigParse() throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
|
||||
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED);
|
||||
|
@ -163,6 +200,44 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
assertEquals("[6:5] [datafeed_config] unknown field [tomorrows_technology_today], parser not found", e.getMessage());
|
||||
}
|
||||
|
||||
public void testPastQueryConfigParse() throws IOException {
|
||||
try(XContentParser parser = XContentFactory.xContent(XContentType.JSON)
|
||||
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_QUERY_DATAFEED)) {
|
||||
|
||||
DatafeedConfig config = DatafeedConfig.LENIENT_PARSER.apply(parser, null).build();
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> config.getParsedQuery());
|
||||
assertEquals("[match] query doesn't support multiple fields, found [query] and [type]", e.getMessage());
|
||||
}
|
||||
|
||||
try(XContentParser parser = XContentFactory.xContent(XContentType.JSON)
|
||||
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_QUERY_DATAFEED)) {
|
||||
|
||||
XContentParseException e = expectThrows(XContentParseException.class,
|
||||
() -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build());
|
||||
assertEquals("[6:25] [datafeed_config] failed to parse field [query]", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testPastAggConfigParse() throws IOException {
|
||||
try(XContentParser parser = XContentFactory.xContent(XContentType.JSON)
|
||||
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) {
|
||||
|
||||
DatafeedConfig.Builder configBuilder = DatafeedConfig.LENIENT_PARSER.apply(parser, null);
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> configBuilder.build());
|
||||
assertEquals(
|
||||
"Datafeed [farequote-datafeed] aggregations are not parsable: [size] must be greater than 0. Found [0] in [airline]",
|
||||
e.getMessage());
|
||||
}
|
||||
|
||||
try(XContentParser parser = XContentFactory.xContent(XContentType.JSON)
|
||||
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, ANACHRONISTIC_AGG_DATAFEED)) {
|
||||
|
||||
XContentParseException e = expectThrows(XContentParseException.class,
|
||||
() -> DatafeedConfig.STRICT_PARSER.apply(parser, null).build());
|
||||
assertEquals("[8:25] [datafeed_config] failed to parse field [aggregations]", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testFutureMetadataParse() throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(XContentType.JSON)
|
||||
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, FUTURE_DATAFEED);
|
||||
|
@ -274,7 +349,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
datafeed.setTypes(Collections.singletonList("my_type"));
|
||||
datafeed.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField(randomAlphaOfLength(10),
|
||||
mockScript(randomAlphaOfLength(10)), randomBoolean())));
|
||||
datafeed.setAggregations(new AggregatorFactories.Builder().addAggregator(AggregationBuilders.avg("foo")));
|
||||
datafeed.setParsedAggregations(new AggregatorFactories.Builder().addAggregator(AggregationBuilders.avg("foo")));
|
||||
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class, datafeed::build);
|
||||
|
||||
|
@ -295,7 +370,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
builder.setIndices(Collections.singletonList("myIndex"));
|
||||
builder.setTypes(Collections.singletonList("myType"));
|
||||
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
|
||||
builder.setAggregations(new AggregatorFactories.Builder().addAggregator(
|
||||
builder.setParsedAggregations(new AggregatorFactories.Builder().addAggregator(
|
||||
AggregationBuilders.dateHistogram("time").interval(300000).subAggregation(maxTime).field("time")));
|
||||
DatafeedConfig datafeedConfig = builder.build();
|
||||
|
||||
|
@ -306,7 +381,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed1", "job1");
|
||||
builder.setIndices(Collections.singletonList("myIndex"));
|
||||
builder.setTypes(Collections.singletonList("myType"));
|
||||
builder.setAggregations(new AggregatorFactories.Builder());
|
||||
builder.setParsedAggregations(new AggregatorFactories.Builder());
|
||||
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::build);
|
||||
|
||||
|
@ -318,13 +393,13 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
builder.setIndices(Collections.singletonList("myIndex"));
|
||||
builder.setTypes(Collections.singletonList("myType"));
|
||||
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
|
||||
builder.setAggregations(new AggregatorFactories.Builder().addAggregator(
|
||||
builder.setParsedAggregations(new AggregatorFactories.Builder().addAggregator(
|
||||
AggregationBuilders.histogram("time").subAggregation(maxTime).field("time"))
|
||||
);
|
||||
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::build);
|
||||
|
||||
assertThat(e.getMessage(), equalTo("Aggregation interval must be greater than 0"));
|
||||
assertThat(e.getMessage(), containsString("[interval] must be >0 for histogram aggregation [time]"));
|
||||
}
|
||||
|
||||
public void testBuild_GivenDateHistogramWithInvalidTimeZone() {
|
||||
|
@ -341,7 +416,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
ElasticsearchException e = expectThrows(ElasticsearchException.class,
|
||||
() -> createDatafeedWithDateHistogram((String) null));
|
||||
|
||||
assertThat(e.getMessage(), equalTo("Aggregation interval must be greater than 0"));
|
||||
assertThat(e.getMessage(), containsString("Aggregation interval must be greater than 0"));
|
||||
}
|
||||
|
||||
public void testBuild_GivenValidDateHistogram() {
|
||||
|
@ -402,9 +477,8 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
TermsAggregationBuilder toplevelTerms = AggregationBuilders.terms("top_level");
|
||||
toplevelTerms.subAggregation(dateHistogram);
|
||||
|
||||
DatafeedConfig.Builder builder = new DatafeedConfig.Builder("foo", "bar");
|
||||
builder.setAggregations(new AggregatorFactories.Builder().addAggregator(toplevelTerms));
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class, builder::validateAggregations);
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class,
|
||||
() -> DatafeedConfig.validateAggregations(new AggregatorFactories.Builder().addAggregator(toplevelTerms)));
|
||||
|
||||
assertEquals("Aggregations can only have 1 date_histogram or histogram aggregation", e.getMessage());
|
||||
}
|
||||
|
@ -520,7 +594,9 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed1", "job1");
|
||||
builder.setIndices(Collections.singletonList("myIndex"));
|
||||
builder.setTypes(Collections.singletonList("myType"));
|
||||
builder.setAggregations(new AggregatorFactories.Builder().addAggregator(dateHistogram));
|
||||
AggregatorFactories.Builder aggs = new AggregatorFactories.Builder().addAggregator(dateHistogram);
|
||||
DatafeedConfig.validateAggregations(aggs);
|
||||
builder.setParsedAggregations(aggs);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -556,11 +632,11 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
break;
|
||||
case 6:
|
||||
BoolQueryBuilder query = new BoolQueryBuilder();
|
||||
if (instance.getQuery() != null) {
|
||||
query.must(instance.getQuery());
|
||||
if (instance.getParsedQuery() != null) {
|
||||
query.must(instance.getParsedQuery());
|
||||
}
|
||||
query.filter(new TermQueryBuilder(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)));
|
||||
builder.setQuery(query);
|
||||
builder.setParsedQuery(query);
|
||||
break;
|
||||
case 7:
|
||||
if (instance.hasAggregations()) {
|
||||
|
@ -571,7 +647,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
|
|||
aggBuilder
|
||||
.addAggregator(new DateHistogramAggregationBuilder(timeField).field(timeField).interval(between(10000, 3600000))
|
||||
.subAggregation(new MaxAggregationBuilder(timeField).field(timeField)));
|
||||
builder.setAggregations(aggBuilder);
|
||||
builder.setParsedAggregations(aggBuilder);
|
||||
if (instance.getScriptFields().isEmpty() == false) {
|
||||
builder.setScriptFields(Collections.emptyList());
|
||||
}
|
||||
|
|
|
@ -167,7 +167,7 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
|
|||
assertThat(updatedDatafeed.getTypes(), equalTo(Collections.singletonList("t_2")));
|
||||
assertThat(updatedDatafeed.getQueryDelay(), equalTo(TimeValue.timeValueSeconds(42)));
|
||||
assertThat(updatedDatafeed.getFrequency(), equalTo(TimeValue.timeValueSeconds(142)));
|
||||
assertThat(updatedDatafeed.getQuery(), equalTo(QueryBuilders.termQuery("a", "b")));
|
||||
assertThat(updatedDatafeed.getParsedQuery(), equalTo(QueryBuilders.termQuery("a", "b")));
|
||||
assertThat(updatedDatafeed.hasAggregations(), is(false));
|
||||
assertThat(updatedDatafeed.getScriptFields(),
|
||||
equalTo(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false))));
|
||||
|
@ -192,7 +192,7 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
|
|||
|
||||
assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_1")));
|
||||
assertThat(updatedDatafeed.getTypes(), equalTo(Collections.singletonList("t_1")));
|
||||
assertThat(updatedDatafeed.getAggregations(),
|
||||
assertThat(updatedDatafeed.getParsedAggregations(),
|
||||
equalTo(new AggregatorFactories.Builder().addAggregator(
|
||||
AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime))));
|
||||
}
|
||||
|
|
|
@ -153,13 +153,13 @@ public class DelayedDataDetectorIT extends MlNativeAutodetectIntegTestCase {
|
|||
DatafeedConfig.Builder datafeedConfigBuilder = createDatafeedBuilder(job.getId() + "-datafeed",
|
||||
job.getId(),
|
||||
Collections.singletonList(index));
|
||||
datafeedConfigBuilder.setAggregations(new AggregatorFactories.Builder().addAggregator(
|
||||
datafeedConfigBuilder.setParsedAggregations(new AggregatorFactories.Builder().addAggregator(
|
||||
AggregationBuilders.histogram("time")
|
||||
.subAggregation(maxTime)
|
||||
.subAggregation(avgAggregationBuilder)
|
||||
.field("time")
|
||||
.interval(TimeValue.timeValueMinutes(5).millis())));
|
||||
datafeedConfigBuilder.setQuery(new RangeQueryBuilder("value").gte(numDocs/2));
|
||||
datafeedConfigBuilder.setParsedQuery(new RangeQueryBuilder("value").gte(numDocs/2));
|
||||
datafeedConfigBuilder.setFrequency(TimeValue.timeValueMinutes(5));
|
||||
datafeedConfigBuilder.setDelayedDataCheckConfig(DelayedDataCheckConfig.enabledDelayedDataCheckConfig(TimeValue.timeValueHours(12)));
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue