Merge remote-tracking branch 'upstream/7.x' into enrich-7.x
This commit is contained in:
commit
860e783f14
40
build.gradle
40
build.gradle
|
@ -584,9 +584,30 @@ allprojects {
|
|||
configurations.findAll { it.isCanBeResolved() }.each { it.resolve() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allprojects {
|
||||
// helper task to print direct dependencies of a single task
|
||||
project.tasks.addRule("Pattern: <taskName>Dependencies") { String taskName ->
|
||||
if (taskName.endsWith("Dependencies") == false) {
|
||||
return
|
||||
}
|
||||
if (project.tasks.findByName(taskName) != null) {
|
||||
return
|
||||
}
|
||||
String realTaskName = taskName.substring(0, taskName.length() - "Dependencies".length())
|
||||
Task realTask = project.tasks.findByName(realTaskName)
|
||||
if (realTask == null) {
|
||||
return
|
||||
}
|
||||
project.tasks.create(taskName) {
|
||||
doLast {
|
||||
println("${realTask.path} dependencies:")
|
||||
for (Task dep : realTask.getTaskDependencies().getDependencies(realTask)) {
|
||||
println(" - ${dep.path}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task checkPart1
|
||||
task checkPart2
|
||||
tasks.matching { it.name == "check" }.all { check ->
|
||||
|
@ -598,6 +619,21 @@ allprojects {
|
|||
}
|
||||
}
|
||||
|
||||
subprojects {
|
||||
// Common config when running with a FIPS-140 runtime JVM
|
||||
if (project.ext.has("inFipsJvm") && project.ext.inFipsJvm) {
|
||||
tasks.withType(Test) {
|
||||
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
||||
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
||||
}
|
||||
project.pluginManager.withPlugin("elasticsearch.testclusters") {
|
||||
project.testClusters.all {
|
||||
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
||||
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -113,8 +113,6 @@ repositories {
|
|||
|
||||
dependencies {
|
||||
compile localGroovy()
|
||||
compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}"
|
||||
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
|
||||
|
||||
compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3'
|
||||
compile 'com.netflix.nebula:nebula-publishing-plugin:4.4.4'
|
||||
|
@ -127,16 +125,7 @@ dependencies {
|
|||
compile 'de.thetaphi:forbiddenapis:2.6'
|
||||
compile 'com.avast.gradle:gradle-docker-compose-plugin:0.8.12'
|
||||
testCompile "junit:junit:${props.getProperty('junit')}"
|
||||
}
|
||||
|
||||
|
||||
// Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs
|
||||
// Use logging dependency instead
|
||||
// Gradle 4.3.1 stopped releasing the logging jars to jcenter, just use the last available one
|
||||
GradleVersion logVersion = GradleVersion.current() > GradleVersion.version('4.3') ? GradleVersion.version('4.3') : GradleVersion.current()
|
||||
|
||||
dependencies {
|
||||
compileOnly "org.gradle:gradle-logging:${logVersion.getVersion()}"
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
|
|
|
@ -61,6 +61,9 @@ import org.gradle.util.GradleVersion
|
|||
import java.nio.charset.StandardCharsets
|
||||
import java.time.ZoneOffset
|
||||
import java.time.ZonedDateTime
|
||||
import java.util.concurrent.ExecutorService
|
||||
import java.util.concurrent.Executors
|
||||
import java.util.concurrent.Future
|
||||
import java.util.regex.Matcher
|
||||
|
||||
/**
|
||||
|
@ -127,13 +130,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
String runtimeJavaHome = findRuntimeJavaHome(compilerJavaHome)
|
||||
File gradleJavaHome = Jvm.current().javaHome
|
||||
|
||||
final Map<Integer, String> javaVersions = [:]
|
||||
for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.majorVersion); version++) {
|
||||
if(System.getenv(getJavaHomeEnvVarName(version.toString())) != null) {
|
||||
javaVersions.put(version, findJavaHome(version.toString()));
|
||||
}
|
||||
}
|
||||
|
||||
String javaVendor = System.getProperty('java.vendor')
|
||||
String gradleJavaVersion = System.getProperty('java.version')
|
||||
String gradleJavaVersionDetails = "${javaVendor} ${gradleJavaVersion}" +
|
||||
|
@ -153,8 +149,12 @@ class BuildPlugin implements Plugin<Project> {
|
|||
runtimeJavaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, runtimeJavaHome))
|
||||
}
|
||||
|
||||
String inFipsJvmScript = 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));'
|
||||
boolean inFipsJvm = Boolean.parseBoolean(runJavaAsScript(project, runtimeJavaHome, inFipsJvmScript))
|
||||
boolean inFipsJvm = false
|
||||
if (new File(runtimeJavaHome).canonicalPath != gradleJavaHome.canonicalPath) {
|
||||
// We don't expect Gradle to be running in a FIPS JVM
|
||||
String inFipsJvmScript = 'print(java.security.Security.getProviders()[0].name.toLowerCase().contains("fips"));'
|
||||
inFipsJvm = Boolean.parseBoolean(runJavaAsScript(project, runtimeJavaHome, inFipsJvmScript))
|
||||
}
|
||||
|
||||
// Build debugging info
|
||||
println '======================================='
|
||||
|
@ -190,24 +190,49 @@ class BuildPlugin implements Plugin<Project> {
|
|||
throw new GradleException(message)
|
||||
}
|
||||
|
||||
for (final Map.Entry<Integer, String> javaVersionEntry : javaVersions.entrySet()) {
|
||||
final String javaHome = javaVersionEntry.getValue()
|
||||
if (javaHome == null) {
|
||||
continue
|
||||
final Map<Integer, String> javaVersions = [:]
|
||||
for (int version = 8; version <= Integer.parseInt(minimumCompilerVersion.majorVersion); version++) {
|
||||
if(System.getenv(getJavaHomeEnvVarName(version.toString())) != null) {
|
||||
javaVersions.put(version, findJavaHome(version.toString()));
|
||||
}
|
||||
JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
|
||||
final JavaVersion expectedJavaVersionEnum
|
||||
final int version = javaVersionEntry.getKey()
|
||||
if (version < 9) {
|
||||
expectedJavaVersionEnum = JavaVersion.toVersion("1." + version)
|
||||
} else {
|
||||
expectedJavaVersionEnum = JavaVersion.toVersion(Integer.toString(version))
|
||||
}
|
||||
|
||||
final int numberOfPhysicalCores = numberOfPhysicalCores(project.rootProject)
|
||||
if (javaVersions.isEmpty() == false) {
|
||||
|
||||
ExecutorService exec = Executors.newFixedThreadPool(numberOfPhysicalCores)
|
||||
Set<Future<Void>> results = new HashSet<>()
|
||||
|
||||
javaVersions.entrySet().stream()
|
||||
.filter { it.getValue() != null }
|
||||
.forEach { javaVersionEntry ->
|
||||
results.add(exec.submit {
|
||||
final String javaHome = javaVersionEntry.getValue()
|
||||
final int version = javaVersionEntry.getKey()
|
||||
if (project.file(javaHome).exists() == false) {
|
||||
throw new GradleException("Invalid JAVA${version}_HOME=${javaHome} location does not exist")
|
||||
}
|
||||
|
||||
JavaVersion javaVersionEnum = JavaVersion.toVersion(findJavaSpecificationVersion(project, javaHome))
|
||||
final JavaVersion expectedJavaVersionEnum = version < 9 ?
|
||||
JavaVersion.toVersion("1." + version) :
|
||||
JavaVersion.toVersion(Integer.toString(version))
|
||||
|
||||
if (javaVersionEnum != expectedJavaVersionEnum) {
|
||||
final String message =
|
||||
"the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" +
|
||||
" ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]"
|
||||
throw new GradleException(message)
|
||||
}
|
||||
})
|
||||
}
|
||||
if (javaVersionEnum != expectedJavaVersionEnum) {
|
||||
final String message =
|
||||
"the environment variable JAVA" + version + "_HOME must be set to a JDK installation directory for Java" +
|
||||
" ${expectedJavaVersionEnum} but is [${javaHome}] corresponding to [${javaVersionEnum}]"
|
||||
throw new GradleException(message)
|
||||
|
||||
project.gradle.taskGraph.whenReady {
|
||||
try {
|
||||
results.forEach { it.get() }
|
||||
} finally {
|
||||
exec.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,7 +248,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
project.rootProject.ext.inFipsJvm = inFipsJvm
|
||||
project.rootProject.ext.gradleJavaVersion = JavaVersion.toVersion(gradleJavaVersion)
|
||||
project.rootProject.ext.java9Home = "${-> findJavaHome("9")}"
|
||||
project.rootProject.ext.defaultParallel = findDefaultParallel(project.rootProject)
|
||||
project.rootProject.ext.defaultParallel = numberOfPhysicalCores
|
||||
}
|
||||
|
||||
project.targetCompatibility = project.rootProject.ext.minimumRuntimeVersion
|
||||
|
@ -983,12 +1008,6 @@ class BuildPlugin implements Plugin<Project> {
|
|||
// TODO: remove this once ctx isn't added to update script params in 7.0
|
||||
systemProperty 'es.scripting.update.ctx_in_params', 'false'
|
||||
|
||||
// Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM
|
||||
if (project.inFipsJvm) {
|
||||
systemProperty 'javax.net.ssl.trustStorePassword', 'password'
|
||||
systemProperty 'javax.net.ssl.keyStorePassword', 'password'
|
||||
}
|
||||
|
||||
testLogging {
|
||||
showExceptions = true
|
||||
showCauses = true
|
||||
|
@ -1006,7 +1025,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
private static int findDefaultParallel(Project project) {
|
||||
private static int numberOfPhysicalCores(Project project) {
|
||||
if (project.file("/proc/cpuinfo").exists()) {
|
||||
// Count physical cores on any Linux distro ( don't count hyper-threading )
|
||||
Map<String, Integer> socketToCore = [:]
|
||||
|
@ -1019,7 +1038,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
if (name == "physical id") {
|
||||
currentID = value
|
||||
}
|
||||
// Number of cores not including hyper-threading
|
||||
// number of cores not including hyper-threading
|
||||
if (name == "cpu cores") {
|
||||
assert currentID.isEmpty() == false
|
||||
socketToCore[currentID] = Integer.valueOf(value)
|
||||
|
@ -1037,8 +1056,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
standardOutput = stdout
|
||||
}
|
||||
return Integer.parseInt(stdout.toString('UTF-8').trim())
|
||||
} else {
|
||||
// guess that it is half the number of processors (which is wrong on systems that do not have simultaneous multi-threading)
|
||||
// TODO: implement this on Windows
|
||||
return Runtime.getRuntime().availableProcessors() / 2
|
||||
}
|
||||
return Runtime.getRuntime().availableProcessors() / 2
|
||||
}
|
||||
|
||||
private static configurePrecommit(Project project) {
|
||||
|
|
|
@ -412,7 +412,11 @@ class ClusterFormationTasks {
|
|||
}
|
||||
esConfig['node.max_local_storage_nodes'] = node.config.numNodes
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
if (node.nodeVersion.onOrAfter('6.7.0')) {
|
||||
esConfig['transport.port'] = node.config.transportPort
|
||||
} else {
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
}
|
||||
// Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
|
||||
esConfig['cluster.routing.allocation.disk.watermark.low'] = '1b'
|
||||
esConfig['cluster.routing.allocation.disk.watermark.high'] = '1b'
|
||||
|
@ -960,6 +964,8 @@ class ClusterFormationTasks {
|
|||
}
|
||||
doLast {
|
||||
project.delete(node.pidFile)
|
||||
// Large tests can exhaust disk space, clean up on stop, but leave the data dir as some tests reuse it
|
||||
project.delete(project.fileTree(node.baseDir).minus(project.fileTree(node.dataDir)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -214,9 +214,15 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
} else {
|
||||
UPGRADE_FROM_ARCHIVES.each {
|
||||
// The version of elasticsearch that we upgrade *from*
|
||||
dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}")
|
||||
if (upgradeFromVersion.onOrAfter('6.3.0')) {
|
||||
dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}")
|
||||
if (upgradeFromVersion.onOrAfter('7.0.0')) {
|
||||
String arch = it == "rpm" ? "x86_64" : "amd64"
|
||||
dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}-${arch}@${it}")
|
||||
dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}-${arch}@${it}")
|
||||
} else {
|
||||
dependencies.add("downloads.${it}:elasticsearch:${upgradeFromVersion}@${it}")
|
||||
if (upgradeFromVersion.onOrAfter('6.3.0')) {
|
||||
dependencies.add("downloads.${it}:elasticsearch-oss:${upgradeFromVersion}@${it}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ public class TestingConventionsTasks extends DefaultTask {
|
|||
public TestingConventionsTasks() {
|
||||
setDescription("Tests various testing conventions");
|
||||
// Run only after everything is compiled
|
||||
Boilerplate.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getClassesTaskName()));
|
||||
Boilerplate.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs()));
|
||||
naming = getProject().container(TestingConventionRule.class);
|
||||
}
|
||||
|
||||
|
|
|
@ -605,7 +605,11 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
|||
defaultConfig.put("node.attr.testattr", "test");
|
||||
defaultConfig.put("node.portsfile", "true");
|
||||
defaultConfig.put("http.port", "0");
|
||||
defaultConfig.put("transport.tcp.port", "0");
|
||||
if (Version.fromString(version).onOrAfter(Version.fromString("6.7.0"))) {
|
||||
defaultConfig.put("transport.port", "0");
|
||||
} else {
|
||||
defaultConfig.put("transport.tcp.port", "0");
|
||||
}
|
||||
// Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space
|
||||
defaultConfig.put("cluster.routing.allocation.disk.watermark.low", "1b");
|
||||
defaultConfig.put("cluster.routing.allocation.disk.watermark.high", "1b");
|
||||
|
|
|
@ -42,11 +42,11 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -56,19 +56,18 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
|
||||
private static final String LIST_TASK_NAME = "listTestClusters";
|
||||
private static final String NODE_EXTENSION_NAME = "testClusters";
|
||||
static final String HELPER_CONFIGURATION_NAME = "testclusters";
|
||||
private static final String HELPER_CONFIGURATION_NAME = "testclusters";
|
||||
private static final String SYNC_ARTIFACTS_TASK_NAME = "syncTestClustersArtifacts";
|
||||
private static final int EXECUTOR_SHUTDOWN_TIMEOUT = 1;
|
||||
private static final TimeUnit EXECUTOR_SHUTDOWN_TIMEOUT_UNIT = TimeUnit.MINUTES;
|
||||
|
||||
private static final Logger logger = Logging.getLogger(TestClustersPlugin.class);
|
||||
|
||||
// this is static because we need a single mapping across multi project builds, as some of the listeners we use,
|
||||
// like task graph are singletons across multi project builds.
|
||||
private static final Map<Task, List<ElasticsearchCluster>> usedClusters = new ConcurrentHashMap<>();
|
||||
private static final Map<ElasticsearchCluster, Integer> claimsInventory = new ConcurrentHashMap<>();
|
||||
private static final Set<ElasticsearchCluster> runningClusters = Collections.synchronizedSet(new HashSet<>());
|
||||
private static volatile ExecutorService executorService;
|
||||
private final Map<Task, List<ElasticsearchCluster>> usedClusters = new HashMap<>();
|
||||
private final Map<ElasticsearchCluster, Integer> claimsInventory = new HashMap<>();
|
||||
private final Set<ElasticsearchCluster> runningClusters =new HashSet<>();
|
||||
private final Thread shutdownHook = new Thread(this::shutDownAllClusters);
|
||||
private ExecutorService executorService = Executors.newSingleThreadExecutor();
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
|
@ -81,10 +80,8 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
createListClustersTask(project, container);
|
||||
|
||||
// create DSL for tasks to mark clusters these use
|
||||
createUseClusterTaskExtension(project);
|
||||
createUseClusterTaskExtension(project, container);
|
||||
|
||||
// There's a single Gradle instance for multi project builds, this means that some configuration needs to be
|
||||
// done only once even if the plugin is applied multiple times as a part of multi project build
|
||||
if (rootProject.getConfigurations().findByName(HELPER_CONFIGURATION_NAME) == null) {
|
||||
// We use a single configuration on the root project to resolve all testcluster dependencies ( like distros )
|
||||
// at once, only once without the need to repeat it for each project. This pays off assuming that most
|
||||
|
@ -95,18 +92,14 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
"ES distributions and plugins."
|
||||
);
|
||||
|
||||
// When running in the Daemon it's possible for this to hold references to past
|
||||
usedClusters.clear();
|
||||
claimsInventory.clear();
|
||||
runningClusters.clear();
|
||||
|
||||
// We have a single task to sync the helper configuration to "artifacts dir"
|
||||
// the clusters will look for artifacts there based on the naming conventions.
|
||||
// Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in
|
||||
// the build.
|
||||
rootProject.getTasks().create(SYNC_ARTIFACTS_TASK_NAME, sync -> {
|
||||
sync.getInputs().files((Callable<FileCollection>) helperConfiguration::getAsFileTree);
|
||||
sync.getOutputs().dir(getTestClustersConfigurationExtractDir(project));
|
||||
sync.getOutputs().dir(new File(project.getRootProject().getBuildDir(), "testclusters/extract"));
|
||||
// NOTE: Gradle doesn't allow a lambda here ( fails at runtime )
|
||||
sync.doLast(new Action<Task>() {
|
||||
@Override
|
||||
public void execute(Task task) {
|
||||
|
@ -121,33 +114,33 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
} else {
|
||||
throw new IllegalArgumentException("Can't extract " + file + " unknown file extension");
|
||||
}
|
||||
spec.from(files).into(getTestClustersConfigurationExtractDir(project) + "/" +
|
||||
spec.from(files).into(new File(project.getRootProject().getBuildDir(), "testclusters/extract") + "/" +
|
||||
resolvedArtifact.getModuleVersion().getId().getGroup()
|
||||
);
|
||||
}));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
|
||||
// that are defined in the build script and the ones that will actually be used in this invocation of gradle
|
||||
// we use this information to determine when the last task that required the cluster executed so that we can
|
||||
// terminate the cluster right away and free up resources.
|
||||
configureClaimClustersHook(project);
|
||||
|
||||
// Before each task, we determine if a cluster needs to be started for that task.
|
||||
configureStartClustersHook(project);
|
||||
|
||||
// After each task we determine if there are clusters that are no longer needed.
|
||||
configureStopClustersHook(project);
|
||||
|
||||
// configure hooks to make sure no test cluster processes survive the build
|
||||
configureCleanupHooks(project);
|
||||
|
||||
// Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the
|
||||
// configuration so the user doesn't have to repeat this.
|
||||
autoConfigureClusterDependencies(project, rootProject, container);
|
||||
}
|
||||
|
||||
// When we know what tasks will run, we claim the clusters of those task to differentiate between clusters
|
||||
// that are defined in the build script and the ones that will actually be used in this invocation of gradle
|
||||
// we use this information to determine when the last task that required the cluster executed so that we can
|
||||
// terminate the cluster right away and free up resources.
|
||||
configureClaimClustersHook(project);
|
||||
|
||||
// Before each task, we determine if a cluster needs to be started for that task.
|
||||
configureStartClustersHook(project);
|
||||
|
||||
// After each task we determine if there are clusters that are no longer needed.
|
||||
configureStopClustersHook(project);
|
||||
|
||||
// configure hooks to make sure no test cluster processes survive the build
|
||||
configureCleanupHooks(project);
|
||||
|
||||
// Since we have everything modeled in the DSL, add all the required dependencies e.x. the distribution to the
|
||||
// configuration so the user doesn't have to repeat this.
|
||||
autoConfigureClusterDependencies(project, rootProject, container);
|
||||
}
|
||||
|
||||
private NamedDomainObjectContainer<ElasticsearchCluster> createTestClustersContainerExtension(Project project) {
|
||||
|
@ -158,7 +151,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
project.getPath(),
|
||||
name,
|
||||
project,
|
||||
getTestClustersConfigurationExtractDir(project),
|
||||
new File(project.getRootProject().getBuildDir(), "testclusters/extract"),
|
||||
new File(project.getBuildDir(), "testclusters")
|
||||
)
|
||||
);
|
||||
|
@ -178,7 +171,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
private static void createUseClusterTaskExtension(Project project) {
|
||||
private void createUseClusterTaskExtension(Project project, NamedDomainObjectContainer<ElasticsearchCluster> container) {
|
||||
// register an extension for all current and future tasks, so that any task can declare that it wants to use a
|
||||
// specific cluster.
|
||||
project.getTasks().all((Task task) ->
|
||||
|
@ -187,6 +180,12 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
"useCluster",
|
||||
new Closure<Void>(project, task) {
|
||||
public void doCall(ElasticsearchCluster cluster) {
|
||||
if (container.contains(cluster) == false) {
|
||||
throw new TestClustersException(
|
||||
"Task " + task.getPath() + " can't use test cluster from" +
|
||||
" another project " + cluster
|
||||
);
|
||||
}
|
||||
Object thisObject = this.getThisObject();
|
||||
if (thisObject instanceof Task == false) {
|
||||
throw new AssertionError("Expected " + thisObject + " to be an instance of " +
|
||||
|
@ -201,35 +200,38 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
private static void configureClaimClustersHook(Project project) {
|
||||
project.getGradle().getTaskGraph().whenReady(taskExecutionGraph ->
|
||||
taskExecutionGraph.getAllTasks()
|
||||
.forEach(task ->
|
||||
usedClusters.getOrDefault(task, Collections.emptyList()).forEach(each -> {
|
||||
synchronized (claimsInventory) {
|
||||
claimsInventory.put(each, claimsInventory.getOrDefault(each, 0) + 1);
|
||||
}
|
||||
each.freeze();
|
||||
})
|
||||
)
|
||||
);
|
||||
private void configureClaimClustersHook(Project project) {
|
||||
// Once we know all the tasks that need to execute, we claim all the clusters that belong to those and count the
|
||||
// claims so we'll know when it's safe to stop them.
|
||||
project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> {
|
||||
Set<String> forExecution = taskExecutionGraph.getAllTasks().stream()
|
||||
.map(Task::getPath)
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
usedClusters.forEach((task, listOfClusters) ->
|
||||
listOfClusters.forEach(elasticsearchCluster -> {
|
||||
if (forExecution.contains(task.getPath())) {
|
||||
elasticsearchCluster.freeze();
|
||||
claimsInventory.put(elasticsearchCluster, claimsInventory.getOrDefault(elasticsearchCluster, 0) + 1);
|
||||
}
|
||||
}));
|
||||
|
||||
logger.info("Claims inventory: {}", claimsInventory);
|
||||
});
|
||||
}
|
||||
|
||||
private static void configureStartClustersHook(Project project) {
|
||||
private void configureStartClustersHook(Project project) {
|
||||
project.getGradle().addListener(
|
||||
new TaskActionListener() {
|
||||
@Override
|
||||
public void beforeActions(Task task) {
|
||||
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
|
||||
final List<ElasticsearchCluster> clustersToStart;
|
||||
synchronized (runningClusters) {
|
||||
clustersToStart = usedClusters.getOrDefault(task,Collections.emptyList()).stream()
|
||||
.filter(each -> runningClusters.contains(each) == false)
|
||||
.collect(Collectors.toList());
|
||||
runningClusters.addAll(clustersToStart);
|
||||
}
|
||||
clustersToStart.forEach(ElasticsearchCluster::start);
|
||||
|
||||
usedClusters.getOrDefault(task, Collections.emptyList()).stream()
|
||||
.filter(each -> runningClusters.contains(each) == false)
|
||||
.forEach(elasticsearchCluster -> {
|
||||
elasticsearchCluster.start();
|
||||
runningClusters.add(elasticsearchCluster);
|
||||
});
|
||||
}
|
||||
@Override
|
||||
public void afterActions(Task task) {}
|
||||
|
@ -237,7 +239,7 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
private static void configureStopClustersHook(Project project) {
|
||||
private void configureStopClustersHook(Project project) {
|
||||
project.getGradle().addListener(
|
||||
new TaskExecutionListener() {
|
||||
@Override
|
||||
|
@ -251,25 +253,19 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
if (state.getFailure() != null) {
|
||||
// If the task fails, and other tasks use this cluster, the other task will likely never be
|
||||
// executed at all, so we will never get to un-claim and terminate it.
|
||||
// The downside is that with multi project builds if that other task is in a different
|
||||
// project and executing right now, we may terminate the cluster while it's running it.
|
||||
clustersUsedByTask.forEach(each -> each.stop(true));
|
||||
} else {
|
||||
clustersUsedByTask.forEach(each -> {
|
||||
synchronized (claimsInventory) {
|
||||
claimsInventory.put(each, claimsInventory.get(each) - 1);
|
||||
}
|
||||
});
|
||||
final List<ElasticsearchCluster> stoppable;
|
||||
synchronized (runningClusters) {
|
||||
stoppable = claimsInventory.entrySet().stream()
|
||||
.filter(entry -> entry.getValue() == 0)
|
||||
.filter(entry -> runningClusters.contains(entry.getKey()))
|
||||
.map(Map.Entry::getKey)
|
||||
.collect(Collectors.toList());
|
||||
runningClusters.removeAll(stoppable);
|
||||
}
|
||||
stoppable.forEach(each -> each.stop(false));
|
||||
clustersUsedByTask.forEach(
|
||||
each -> claimsInventory.put(each, claimsInventory.getOrDefault(each, 0) - 1)
|
||||
);
|
||||
claimsInventory.entrySet().stream()
|
||||
.filter(entry -> entry.getValue() == 0)
|
||||
.filter(entry -> runningClusters.contains(entry.getKey()))
|
||||
.map(Map.Entry::getKey)
|
||||
.forEach(each -> {
|
||||
each.stop(false);
|
||||
runningClusters.remove(each);
|
||||
});
|
||||
}
|
||||
}
|
||||
@Override
|
||||
|
@ -278,10 +274,6 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
);
|
||||
}
|
||||
|
||||
static File getTestClustersConfigurationExtractDir(Project project) {
|
||||
return new File(project.getRootProject().getBuildDir(), "testclusters/extract");
|
||||
}
|
||||
|
||||
/**
|
||||
* Boilerplate to get testClusters container extension
|
||||
*
|
||||
|
@ -354,15 +346,9 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
})));
|
||||
}
|
||||
|
||||
private static void configureCleanupHooks(Project project) {
|
||||
synchronized (runningClusters) {
|
||||
if (executorService == null || executorService.isTerminated()) {
|
||||
executorService = Executors.newSingleThreadExecutor();
|
||||
} else {
|
||||
throw new IllegalStateException("Trying to configure executor service twice");
|
||||
}
|
||||
}
|
||||
private void configureCleanupHooks(Project project) {
|
||||
// When the Gradle daemon is used, it will interrupt all threads when the build concludes.
|
||||
// This is our signal to clean up
|
||||
executorService.submit(() -> {
|
||||
while (true) {
|
||||
try {
|
||||
|
@ -375,17 +361,21 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
}
|
||||
});
|
||||
|
||||
project.getGradle().buildFinished(buildResult -> {
|
||||
logger.info("Build finished");
|
||||
shutdownExecutorService();
|
||||
});
|
||||
// When the Daemon is not used, or runs into issues, rely on a shutdown hook
|
||||
// When the daemon is used, but does not work correctly and eventually dies off (e.x. due to non interruptible
|
||||
// thread in the build) process will be stopped eventually when the daemon dies.
|
||||
Runtime.getRuntime().addShutdownHook(new Thread(TestClustersPlugin::shutDownAllClusters));
|
||||
Runtime.getRuntime().addShutdownHook(shutdownHook);
|
||||
|
||||
// When we don't run into anything out of the ordinary, and the build completes, makes sure to clean up
|
||||
project.getGradle().buildFinished(buildResult -> {
|
||||
shutdownExecutorService();
|
||||
if (false == Runtime.getRuntime().removeShutdownHook(shutdownHook)) {
|
||||
logger.info("Trying to deregister shutdown hook when it was not registered.");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static void shutdownExecutorService() {
|
||||
private void shutdownExecutorService() {
|
||||
executorService.shutdownNow();
|
||||
try {
|
||||
if (executorService.awaitTermination(EXECUTOR_SHUTDOWN_TIMEOUT, EXECUTOR_SHUTDOWN_TIMEOUT_UNIT) == false) {
|
||||
|
@ -400,13 +390,13 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
private static void shutDownAllClusters() {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Shutting down all test clusters", new RuntimeException());
|
||||
}
|
||||
private void shutDownAllClusters() {
|
||||
synchronized (runningClusters) {
|
||||
runningClusters.forEach(each -> each.stop(true));
|
||||
runningClusters.clear();
|
||||
Iterator<ElasticsearchCluster> iterator = runningClusters.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
iterator.remove();
|
||||
iterator.next().stop(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,32 +30,9 @@ public abstract class SingleGroupSource implements ToXContentObject {
|
|||
protected static final ParseField FIELD = new ParseField("field");
|
||||
|
||||
public enum Type {
|
||||
TERMS(0),
|
||||
HISTOGRAM(1),
|
||||
DATE_HISTOGRAM(2);
|
||||
|
||||
private final byte id;
|
||||
|
||||
Type(int id) {
|
||||
this.id = (byte) id;
|
||||
}
|
||||
|
||||
public byte getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public static Type fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0:
|
||||
return TERMS;
|
||||
case 1:
|
||||
return HISTOGRAM;
|
||||
case 2:
|
||||
return DATE_HISTOGRAM;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown type");
|
||||
}
|
||||
}
|
||||
TERMS,
|
||||
HISTOGRAM,
|
||||
DATE_HISTOGRAM;
|
||||
|
||||
public String value() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
|
|
|
@ -133,9 +133,7 @@ public class GraphExploreResponse implements ToXContentObject {
|
|||
builder.startArray(FAILURES.getPreferredName());
|
||||
if (shardFailures != null) {
|
||||
for (ShardOperationFailedException shardFailure : shardFailures) {
|
||||
builder.startObject();
|
||||
shardFailure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -56,7 +56,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
|||
/**
|
||||
* A request to create an index template.
|
||||
*/
|
||||
public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContent {
|
||||
public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContentFragment {
|
||||
|
||||
private String name;
|
||||
|
||||
|
@ -191,7 +191,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
public Settings settings() {
|
||||
return this.settings;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
|
@ -201,7 +201,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
public PutIndexTemplateRequest mapping(String source, XContentType xContentType) {
|
||||
internalMapping(XContentHelper.convertToMap(new BytesArray(source), true, xContentType).v2());
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The cause for this index template creation.
|
||||
|
@ -221,11 +221,11 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
* @param source The mapping source
|
||||
*/
|
||||
public PutIndexTemplateRequest mapping(XContentBuilder source) {
|
||||
internalMapping(XContentHelper.convertToMap(BytesReference.bytes(source),
|
||||
internalMapping(XContentHelper.convertToMap(BytesReference.bytes(source),
|
||||
true, source.contentType()).v2());
|
||||
return this;
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
|
@ -235,8 +235,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
public PutIndexTemplateRequest mapping(BytesReference source, XContentType xContentType) {
|
||||
internalMapping(XContentHelper.convertToMap(source, true, xContentType).v2());
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds mapping that will be added when the index gets created.
|
||||
*
|
||||
|
@ -244,7 +244,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
*/
|
||||
public PutIndexTemplateRequest mapping(Map<String, Object> source) {
|
||||
return internalMapping(source);
|
||||
}
|
||||
}
|
||||
|
||||
private PutIndexTemplateRequest internalMapping(Map<String, Object> source) {
|
||||
try {
|
||||
|
@ -257,12 +257,12 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return this;
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException("failed to convert source to json", e);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public BytesReference mappings() {
|
||||
return this.mappings;
|
||||
}
|
||||
|
@ -349,8 +349,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
*/
|
||||
public PutIndexTemplateRequest source(BytesReference source, XContentType xContentType) {
|
||||
return source(XContentHelper.convertToMap(source, true, xContentType).v2());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
public Set<Alias> aliases() {
|
||||
return this.aliases;
|
||||
|
@ -441,7 +441,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
builder.startObject("aliases");
|
||||
for (Alias alias : aliases) {
|
||||
alias.toXContent(builder, params);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.common.ParseField;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -34,7 +34,7 @@ import java.util.List;
|
|||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
public class FindFileStructureRequest implements Validatable, ToXContent {
|
||||
public class FindFileStructureRequest implements Validatable, ToXContentFragment {
|
||||
|
||||
public static final ParseField LINES_TO_SAMPLE = new ParseField("lines_to_sample");
|
||||
public static final ParseField TIMEOUT = new ParseField("timeout");
|
||||
|
|
|
@ -56,6 +56,7 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase {
|
|||
executeBulkRejectionLoad(BackoffPolicy.noBackoff(), rejectedExecutionExpected);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41324")
|
||||
public void testBulkRejectionLoadWithBackoff() throws Throwable {
|
||||
boolean rejectedExecutionExpected = false;
|
||||
executeBulkRejectionLoad(BackoffPolicy.exponentialBackoff(), rejectedExecutionExpected);
|
||||
|
|
|
@ -915,7 +915,6 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
|||
return forecastJobResponse.getForecastId();
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/41070")
|
||||
public void testDeleteExpiredData() throws Exception {
|
||||
|
||||
String jobId = "test-delete-expired-data";
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
|
@ -61,6 +62,7 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
||||
|
@ -176,7 +178,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
MainResponse testInfo = new MainResponse("nodeName", new MainResponse.Version("number", "buildFlavor", "buildType", "buildHash",
|
||||
"buildDate", true, "luceneVersion", "minimumWireCompatibilityVersion", "minimumIndexCompatibilityVersion"),
|
||||
"clusterName", "clusterUuid", "You Know, for Search");
|
||||
mockResponse((builder, params) -> {
|
||||
mockResponse((ToXContentFragment) (builder, params) -> {
|
||||
// taken from the server side MainResponse
|
||||
builder.field("name", testInfo.getNodeName());
|
||||
builder.field("cluster_name", testInfo.getClusterName());
|
||||
|
@ -762,12 +764,12 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
||||
|
||||
// TODO remove in 8.0 - we will undeprecate indices.get_template because the current getIndexTemplate
|
||||
// impl will replace the existing getTemplate method.
|
||||
// impl will replace the existing getTemplate method.
|
||||
// The above general-purpose code ignores all deprecated methods which in this case leaves `getTemplate`
|
||||
// looking like it doesn't have a valid implementatation when it does.
|
||||
// looking like it doesn't have a valid implementatation when it does.
|
||||
apiUnsupported.remove("indices.get_template");
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
for (Map.Entry<String, Set<Method>> entry : methods.entrySet()) {
|
||||
String apiName = entry.getKey();
|
||||
|
@ -830,7 +832,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
assertThat("the return type for method [" + method + "] is incorrect",
|
||||
method.getReturnType().getSimpleName(), equalTo("boolean"));
|
||||
} else {
|
||||
// It's acceptable for 404s to be represented as empty Optionals
|
||||
// It's acceptable for 404s to be represented as empty Optionals
|
||||
if (!method.getReturnType().isAssignableFrom(Optional.class)) {
|
||||
assertThat("the return type for method [" + method + "] is incorrect",
|
||||
method.getReturnType().getSimpleName(), endsWith("Response"));
|
||||
|
|
|
@ -113,25 +113,25 @@ task buildIntegTestZip(type: Zip) {
|
|||
task buildWindowsZip(type: Zip) {
|
||||
configure(commonZipConfig)
|
||||
archiveClassifier = 'windows-x86_64'
|
||||
with archiveFiles(modulesFiles(false), 'zip', 'windows', false, true)
|
||||
with archiveFiles(modulesFiles(false, 'windows'), 'zip', 'windows', false, true)
|
||||
}
|
||||
|
||||
task buildOssWindowsZip(type: Zip) {
|
||||
configure(commonZipConfig)
|
||||
archiveClassifier = 'windows-x86_64'
|
||||
with archiveFiles(modulesFiles(true), 'zip', 'windows', true, true)
|
||||
with archiveFiles(modulesFiles(true, 'windows'), 'zip', 'windows', true, true)
|
||||
}
|
||||
|
||||
task buildNoJdkWindowsZip(type: Zip) {
|
||||
configure(commonZipConfig)
|
||||
archiveClassifier = 'no-jdk-windows-x86_64'
|
||||
with archiveFiles(modulesFiles(false), 'zip', 'windows', false, false)
|
||||
with archiveFiles(modulesFiles(false, 'windows'), 'zip', 'windows', false, false)
|
||||
}
|
||||
|
||||
task buildOssNoJdkWindowsZip(type: Zip) {
|
||||
configure(commonZipConfig)
|
||||
archiveClassifier = 'no-jdk-windows-x86_64'
|
||||
with archiveFiles(modulesFiles(true), 'zip', 'windows', true, false)
|
||||
with archiveFiles(modulesFiles(true, 'windows'), 'zip', 'windows', true, false)
|
||||
}
|
||||
|
||||
Closure commonTarConfig = {
|
||||
|
@ -144,49 +144,49 @@ Closure commonTarConfig = {
|
|||
task buildDarwinTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'darwin-x86_64'
|
||||
with archiveFiles(modulesFiles(false), 'tar', 'darwin', false, true)
|
||||
with archiveFiles(modulesFiles(false, 'darwin'), 'tar', 'darwin', false, true)
|
||||
}
|
||||
|
||||
task buildOssDarwinTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'darwin-x86_64'
|
||||
with archiveFiles(modulesFiles(true), 'tar', 'darwin', true, true)
|
||||
with archiveFiles(modulesFiles(true, 'darwin'), 'tar', 'darwin', true, true)
|
||||
}
|
||||
|
||||
task buildNoJdkDarwinTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'no-jdk-darwin-x86_64'
|
||||
with archiveFiles(modulesFiles(false), 'tar', 'darwin', false, false)
|
||||
with archiveFiles(modulesFiles(false, 'darwin'), 'tar', 'darwin', false, false)
|
||||
}
|
||||
|
||||
task buildOssNoJdkDarwinTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'no-jdk-darwin-x86_64'
|
||||
with archiveFiles(modulesFiles(true), 'tar', 'darwin', true, false)
|
||||
with archiveFiles(modulesFiles(true, 'darwin'), 'tar', 'darwin', true, false)
|
||||
}
|
||||
|
||||
task buildLinuxTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'linux-x86_64'
|
||||
with archiveFiles(modulesFiles(false), 'tar', 'linux', false, true)
|
||||
with archiveFiles(modulesFiles(false, 'linux'), 'tar', 'linux', false, true)
|
||||
}
|
||||
|
||||
task buildOssLinuxTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'linux-x86_64'
|
||||
with archiveFiles(modulesFiles(true), 'tar', 'linux', true, true)
|
||||
with archiveFiles(modulesFiles(true, 'linux'), 'tar', 'linux', true, true)
|
||||
}
|
||||
|
||||
task buildNoJdkLinuxTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'no-jdk-linux-x86_64'
|
||||
with archiveFiles(modulesFiles(false), 'tar', 'linux', false, false)
|
||||
with archiveFiles(modulesFiles(false, 'linux'), 'tar', 'linux', false, false)
|
||||
}
|
||||
|
||||
task buildOssNoJdkLinuxTar(type: Tar) {
|
||||
configure(commonTarConfig)
|
||||
archiveClassifier = 'no-jdk-linux-x86_64'
|
||||
with archiveFiles(modulesFiles(true), 'tar', 'linux', true, false)
|
||||
with archiveFiles(modulesFiles(true, 'linux'), 'tar', 'linux', true, false)
|
||||
}
|
||||
|
||||
Closure tarExists = { it -> new File('/bin/tar').exists() || new File('/usr/bin/tar').exists() || new File('/usr/local/bin/tar').exists() }
|
||||
|
|
|
@ -305,7 +305,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
|
|||
}
|
||||
}
|
||||
|
||||
modulesFiles = { oss ->
|
||||
modulesFiles = { oss, platform ->
|
||||
copySpec {
|
||||
eachFile {
|
||||
if (it.relativePath.segments[-2] == 'bin') {
|
||||
|
@ -315,10 +315,22 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
|
|||
it.mode = 0644
|
||||
}
|
||||
}
|
||||
Task buildModules
|
||||
if (oss) {
|
||||
from project(':distribution').buildOssModules
|
||||
buildModules = project(':distribution').buildOssModules
|
||||
} else {
|
||||
from project(':distribution').buildDefaultModules
|
||||
buildModules = project(':distribution').buildDefaultModules
|
||||
}
|
||||
List excludePlatforms = ['linux', 'windows', 'darwin']
|
||||
if (platform != null) {
|
||||
excludePlatforms.remove(excludePlatforms.indexOf(platform))
|
||||
} else {
|
||||
excludePlatforms = []
|
||||
}
|
||||
from(buildModules) {
|
||||
for (String excludePlatform : excludePlatforms) {
|
||||
exclude "**/platform/${excludePlatform}-x86_64/**"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,10 +90,7 @@ void addCopyDockerfileTask(final boolean oss) {
|
|||
}
|
||||
|
||||
preProcessFixture {
|
||||
dependsOn taskName("copy", true, "DockerContext")
|
||||
dependsOn taskName("copy", true, "Dockerfile")
|
||||
dependsOn taskName("copy", false, "DockerContext")
|
||||
dependsOn taskName("copy", false, "Dockerfile")
|
||||
dependsOn assemble
|
||||
}
|
||||
|
||||
postProcessFixture.doLast {
|
||||
|
@ -110,12 +107,16 @@ void addBuildDockerImage(final boolean oss) {
|
|||
dependsOn taskName("copy", oss, "Dockerfile")
|
||||
List<String> tags
|
||||
if (oss) {
|
||||
tags = [ "docker.elastic.co/elasticsearch/elasticsearch-oss:${VersionProperties.elasticsearch}" ]
|
||||
tags = [
|
||||
"docker.elastic.co/elasticsearch/elasticsearch-oss:${VersionProperties.elasticsearch}",
|
||||
"elasticsearch-oss:test"
|
||||
]
|
||||
} else {
|
||||
tags = [
|
||||
"elasticsearch:${VersionProperties.elasticsearch}",
|
||||
"docker.elastic.co/elasticsearch/elasticsearch:${VersionProperties.elasticsearch}",
|
||||
"docker.elastic.co/elasticsearch/elasticsearch-full:${VersionProperties.elasticsearch}"
|
||||
"docker.elastic.co/elasticsearch/elasticsearch-full:${VersionProperties.elasticsearch}",
|
||||
"elasticsearch:test",
|
||||
]
|
||||
}
|
||||
executable 'docker'
|
||||
|
@ -137,3 +138,8 @@ for (final boolean oss : [false, true]) {
|
|||
|
||||
assemble.dependsOn "buildOssDockerImage"
|
||||
assemble.dependsOn "buildDockerImage"
|
||||
|
||||
// We build the images used in compose locally, but the pull command insists on using a repository
|
||||
// thus we must disable it to prevent it from doing so.
|
||||
// Everything will still be pulled since we will build the local images on a pull
|
||||
tasks.matching { name == "composePull" }.all { enabled = false }
|
||||
|
|
|
@ -2,18 +2,14 @@
|
|||
version: '3'
|
||||
services:
|
||||
elasticsearch-default:
|
||||
build:
|
||||
context: ./build/docker
|
||||
dockerfile: Dockerfile
|
||||
image: elasticsearch:test
|
||||
environment:
|
||||
- cluster.name=elasticsearch-default
|
||||
- discovery.type=single-node
|
||||
ports:
|
||||
- "9200"
|
||||
elasticsearch-oss:
|
||||
build:
|
||||
context: ./build/oss-docker
|
||||
dockerfile: Dockerfile
|
||||
image: elasticsearch-oss:test
|
||||
environment:
|
||||
- cluster.name=elasticsearch-oss
|
||||
- discovery.type=single-node
|
||||
|
|
|
@ -139,7 +139,7 @@ Closure commonPackageConfig(String type, boolean oss, boolean jdk) {
|
|||
with libFiles(oss)
|
||||
}
|
||||
into('modules') {
|
||||
with modulesFiles(oss)
|
||||
with modulesFiles(oss, 'linux')
|
||||
}
|
||||
if (jdk) {
|
||||
into('jdk') {
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
:version: 7.1.0
|
||||
////
|
||||
bare_version never includes -alpha or -beta
|
||||
////
|
||||
:bare_version: 7.1.0
|
||||
:major-version: 7.x
|
||||
:prev-major-version: 6.x
|
||||
:lucene_version: 8.0.0
|
||||
|
@ -42,6 +46,7 @@ ifeval::["{release-state}"=="unreleased"]
|
|||
:percolator-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/percolator-client/{version}-SNAPSHOT
|
||||
:matrixstats-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/aggs-matrix-stats-client/{version}-SNAPSHOT
|
||||
:rank-eval-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/rank-eval-client/{version}-SNAPSHOT
|
||||
:version_qualified: {bare_version}-SNAPSHOT
|
||||
endif::[]
|
||||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
|
@ -55,6 +60,7 @@ ifeval::["{release-state}"!="unreleased"]
|
|||
:percolator-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/percolator-client/{version}
|
||||
:matrixstats-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/aggs-matrix-stats-client/{version}
|
||||
:rank-eval-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/rank-eval-client/{version}
|
||||
:version_qualified: {bare_version}
|
||||
endif::[]
|
||||
|
||||
:javadoc-client: {rest-high-level-client-javadoc}/org/elasticsearch/client
|
||||
|
|
|
@ -12,7 +12,9 @@ It accepts a +{request}+ object and responds with a +{response}+ object.
|
|||
[id="{upid}-{api}-request"]
|
||||
==== Stop Data Frame Request
|
||||
|
||||
A +{request}+ object requires a non-null `id`.
|
||||
A +{request}+ object requires a non-null `id`. `id` can be a comma separated list of Ids
|
||||
or a single Id. Wildcards, `*` and `_all` are also accepted.
|
||||
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
|
|
|
@ -413,6 +413,14 @@ The following parameters are accepted by `icu_collation_keyword` fields:
|
|||
Accepts a string value which is substituted for any explicit `null`
|
||||
values. Defaults to `null`, which means the field is treated as missing.
|
||||
|
||||
{ref}/ignore-above.html[`ignore_above`]::
|
||||
|
||||
Strings longer than the `ignore_above` setting will be ignored.
|
||||
Checking is performed on the original string before the collation.
|
||||
The `ignore_above` setting can be updated on existing fields
|
||||
using the {ref}/indices-put-mapping.html[PUT mapping API].
|
||||
By default, there is no limit and all values will be indexed.
|
||||
|
||||
`store`::
|
||||
|
||||
Whether the field value should be stored and retrievable separately from
|
||||
|
|
|
@ -245,7 +245,8 @@ include::repository-shared-settings.asciidoc[]
|
|||
|
||||
`application_name`::
|
||||
|
||||
deprecated[7.0.0, This setting is now defined in the <<repository-gcs-client, client settings>>]
|
||||
deprecated:[6.3.0, "This setting is now defined in the <<repository-gcs-client, client settings>>."]
|
||||
Name used by the client when it uses the Google Cloud Storage service.
|
||||
|
||||
[[repository-gcs-bucket-permission]]
|
||||
===== Recommended Bucket Permission
|
||||
|
|
|
@ -94,10 +94,10 @@ settings belong in the `elasticsearch.yml` file.
|
|||
|
||||
An S3 secret key. The `access_key` setting must also be specified.
|
||||
|
||||
`session_token`::
|
||||
`session_token` ({ref}/secure-settings.html[Secure])::
|
||||
|
||||
An S3 session token. The `access_key` and `secret_key` settings must also be
|
||||
specified. (Secure)
|
||||
specified.
|
||||
|
||||
`endpoint`::
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ Response:
|
|||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
||||
=== Intervals
|
||||
==== Intervals
|
||||
|
||||
The interval of the returned buckets is selected based on the data collected by the
|
||||
aggregation so that the number of buckets returned is less than or equal to the number
|
||||
|
|
|
@ -132,7 +132,7 @@ The `standard` analyzer accepts the following parameters:
|
|||
`stopwords`::
|
||||
|
||||
A pre-defined stop words list like `_english_` or an array containing a
|
||||
list of stop words. Defaults to `\_none_`.
|
||||
list of stop words. Defaults to `_none_`.
|
||||
|
||||
`stopwords_path`::
|
||||
|
||||
|
|
|
@ -78,4 +78,4 @@ Elasticsearch provides the following predefined list of languages:
|
|||
`_portuguese_`, `_romanian_`, `_russian_`, `_sorani_`, `_spanish_`,
|
||||
`_swedish_`, `_thai_`, `_turkish_`.
|
||||
|
||||
For the empty stopwords list (to disable stopwords) use: `\_none_`.
|
||||
For the empty stopwords list (to disable stopwords) use: `_none_`.
|
||||
|
|
|
@ -10,9 +10,17 @@ Stops one or more {dataframe-transforms}.
|
|||
|
||||
==== Request
|
||||
|
||||
`POST _data_frame/transforms/<data_frame_transform_id>/_stop`
|
||||
`POST _data_frame/transforms/<data_frame_transform_id>/_stop` +
|
||||
|
||||
//==== Description
|
||||
`POST _data_frame/transforms/<data_frame_transform_id1>,<data_frame_transform_id2>/_stop` +
|
||||
|
||||
`POST _data_frame/transforms/_all/_stop`
|
||||
|
||||
|
||||
==== Description
|
||||
You can stop multiple {dataframe-transforms} in a single API request by using a
|
||||
comma-separated list of {dataframe-transforms} or a wildcard expression.
|
||||
All {dataframe-transforms} can be stopped by using `_all` or `*` as the `<data_frame_transform_id>`.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ more primary shards to scale the number of <<glossary-document,documents>>
|
|||
that your index can handle.
|
||||
+
|
||||
You cannot change the number of primary shards in an index, once the index is
|
||||
index is created. However, an index can be split into a new index using the
|
||||
created. However, an index can be split into a new index using the
|
||||
<<indices-split-index, split API>>.
|
||||
+
|
||||
See also <<glossary-routing,routing>>
|
||||
|
|
|
@ -45,26 +45,17 @@ specific index module:
|
|||
part of the cluster.
|
||||
|
||||
`index.shard.check_on_startup`::
|
||||
+
|
||||
--
|
||||
|
||||
Whether or not shards should be checked for corruption before opening. When
|
||||
corruption is detected, it will prevent the shard from being opened. Accepts:
|
||||
|
||||
`false`::
|
||||
|
||||
(default) Don't check for corruption when opening a shard.
|
||||
|
||||
`checksum`::
|
||||
|
||||
Check for physical corruption.
|
||||
|
||||
`true`::
|
||||
|
||||
Check for both physical and logical corruption. This is much more
|
||||
expensive in terms of CPU and memory usage.
|
||||
|
||||
WARNING: Expert only. Checking shards may take a lot of time on large indices.
|
||||
--
|
||||
corruption is detected, it will prevent the shard from being opened.
|
||||
Accepts:
|
||||
`false`::: (default) Don't check for corruption when opening a shard.
|
||||
`checksum`::: Check for physical corruption.
|
||||
`true`::: Check for both physical and logical corruption. This is much more
|
||||
expensive in terms of CPU and memory usage.
|
||||
+
|
||||
WARNING: Expert only. Checking shards may take a lot of time on large
|
||||
indices.
|
||||
|
||||
[[index-codec]] `index.codec`::
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ PUT my_source_index
|
|||
"index.number_of_shards" : 1
|
||||
}
|
||||
}
|
||||
-------------------------------------------------
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
In order to split an index, the index must be marked as read-only,
|
||||
|
|
|
@ -7,11 +7,11 @@ you are using Elasticsearch as a web session store. You may want to index the
|
|||
session ID and last update time, but you don't need to query or run
|
||||
aggregations on the session data itself.
|
||||
|
||||
The `enabled` setting, which can be applied only to the mapping type and to
|
||||
<<object,`object`>> fields, causes Elasticsearch to skip parsing of the
|
||||
contents of the field entirely. The JSON can still be retrieved from the
|
||||
<<mapping-source-field,`_source`>> field, but it is not searchable or stored
|
||||
in any other way:
|
||||
The `enabled` setting, which can be applied only to the top-level mapping
|
||||
definition and to <<object,`object`>> fields, causes Elasticsearch to skip
|
||||
parsing of the contents of the field entirely. The JSON can still be retrieved
|
||||
from the <<mapping-source-field,`_source`>> field, but it is not searchable or
|
||||
stored in any other way:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -26,6 +26,7 @@ PUT my_index
|
|||
"type": "date"
|
||||
},
|
||||
"session_data": { <1>
|
||||
"type": "object",
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
|
@ -55,7 +56,7 @@ PUT my_index/_doc/session_2
|
|||
<2> Any arbitrary data can be passed to the `session_data` field as it will be entirely ignored.
|
||||
<3> The `session_data` will also ignore values that are not JSON objects.
|
||||
|
||||
The entire mapping type may be disabled as well, in which case the document is
|
||||
The entire mapping may be disabled as well, in which case the document is
|
||||
stored in the <<mapping-source-field,`_source`>> field, which means it can be
|
||||
retrieved, but none of its contents are indexed in any way:
|
||||
|
||||
|
@ -84,10 +85,34 @@ GET my_index/_doc/session_1 <2>
|
|||
GET my_index/_mapping <3>
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The entire mapping type is disabled.
|
||||
<1> The entire mapping is disabled.
|
||||
<2> The document can be retrieved.
|
||||
<3> Checking the mapping reveals that no fields have been added.
|
||||
|
||||
TIP: The `enabled` setting can be updated on existing fields
|
||||
using the <<indices-put-mapping,PUT mapping API>>.
|
||||
|
||||
Note that because Elasticsearch completely skips parsing the field
|
||||
contents, it is possible to add non-object data to a disabled field:
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"session_data": {
|
||||
"type": "object",
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PUT my_index/_doc/session_1
|
||||
{
|
||||
"session_data": "foo bar" <1>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
<1> The document is added successfully, even though `session_data` contains non-object data.
|
|
@ -16,4 +16,54 @@ coming[7.1.0]
|
|||
|
||||
//tag::notable-breaking-changes[]
|
||||
|
||||
// end::notable-breaking-changes[]
|
||||
// end::notable-breaking-changes[]
|
||||
|
||||
[float]
|
||||
[[breaking_71_http_changes]]
|
||||
=== HTTP changes
|
||||
|
||||
[float]
|
||||
==== Deprecation of old HTTP settings
|
||||
|
||||
The `http.tcp_no_delay` setting is deprecated in 7.1. It is replaced by
|
||||
`http.tcp.no_delay`.
|
||||
|
||||
[float]
|
||||
[[breaking_71_network_changes]]
|
||||
=== Network changes
|
||||
|
||||
[float]
|
||||
==== Deprecation of old network settings
|
||||
|
||||
The `network.tcp.connect_timeout` setting is deprecated in 7.1. This setting
|
||||
was a fallback setting for `transport.connect_timeout`. To change the default
|
||||
connection timeout for client connections, modify `transport.connect_timeout`.
|
||||
|
||||
[float]
|
||||
[[breaking_71_transport_changes]]
|
||||
=== Transport changes
|
||||
|
||||
//tag::notable-breaking-changes[]
|
||||
[float]
|
||||
==== Deprecation of old transport settings
|
||||
|
||||
The following settings are deprecated in 7.1. Each setting has a replacement
|
||||
setting that was introduced in 6.7.
|
||||
|
||||
- `transport.tcp.port` is replaced by `transport.port`
|
||||
- `transport.tcp.compress` is replaced by `transport.compress`
|
||||
- `transport.tcp.connect_timeout` is replaced by `transport.connect_timeout`
|
||||
- `transport.tcp_no_delay` is replaced by `transport.tcp.no_delay`
|
||||
- `transport.profiles.profile_name.tcp_no_delay` is replaced by
|
||||
`transport.profiles.profile_name.tcp.no_delay`
|
||||
- `transport.profiles.profile_name.tcp_keep_alive` is replaced by
|
||||
`transport.profiles.profile_name.tcp.keep_alive`
|
||||
- `transport.profiles.profile_name.reuse_address` is replaced by
|
||||
`transport.profiles.profile_name.tcp.reuse_address`
|
||||
- `transport.profiles.profile_name.send_buffer_size` is replaced by `transport.profiles.profile_name.tcp.send_buffer_size`
|
||||
- `transport.profiles.profile_name.receive_buffer_size` is replaced by `transport.profiles.profile_name.tcp.receive_buffer_size`
|
||||
|
||||
// end::notable-breaking-changes[]
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -9,16 +9,21 @@ up: nodes that have already joined a cluster store this information in their
|
|||
data folder and freshly-started nodes that are joining an existing cluster
|
||||
obtain this information from the cluster's elected master.
|
||||
|
||||
The initial set of master-eligible nodes is defined in the
|
||||
<<initial_master_nodes,`cluster.initial_master_nodes` setting>>. When you
|
||||
start a master-eligible node, you can provide this setting on the command line
|
||||
or in the `elasticsearch.yml` file. After the cluster has formed, this setting
|
||||
is no longer required and is ignored. It need not be set
|
||||
on master-ineligible nodes, nor on master-eligible nodes that are started to
|
||||
join an existing cluster. Note that master-eligible nodes should use storage
|
||||
that persists across restarts. If they do not, and
|
||||
`cluster.initial_master_nodes` is set, and a full cluster restart occurs, then
|
||||
another brand-new cluster will form and this may result in data loss.
|
||||
The initial set of master-eligible nodes is defined in the
|
||||
<<initial_master_nodes,`cluster.initial_master_nodes` setting>>. This is a list
|
||||
of the <<node.name,node names>> or IP addresses of the master-eligible nodes in
|
||||
the new cluster. If you do not configure `node.name` then it is set to the
|
||||
node's hostname, so in this case you can use hostnames in
|
||||
`cluster.initial_master_nodes` too.
|
||||
|
||||
When you start a master-eligible node, you can provide this setting on the
|
||||
command line or in the `elasticsearch.yml` file. After the cluster has formed,
|
||||
this setting is no longer required and is ignored. It need not be set on
|
||||
master-ineligible nodes, nor on master-eligible nodes that are started to join
|
||||
an existing cluster. Note that master-eligible nodes should use storage that
|
||||
persists across restarts. If they do not, and `cluster.initial_master_nodes` is
|
||||
set, and a full cluster restart occurs, then another brand-new cluster will
|
||||
form and this may result in data loss.
|
||||
|
||||
It is technically sufficient to set `cluster.initial_master_nodes` on a single
|
||||
master-eligible node in the cluster, and only to mention that single node in the
|
||||
|
@ -42,10 +47,9 @@ cluster.initial_master_nodes:
|
|||
- master-c
|
||||
--------------------------------------------------
|
||||
|
||||
Alternatively the IP addresses or hostnames (<<node.name,if node name defaults
|
||||
to the host name>>) can be used. If there is more than one Elasticsearch node
|
||||
with the same IP address or hostname then the transport ports must also be given
|
||||
to specify exactly which node is meant:
|
||||
You can use a mix of IP addresses and node names too. If there is more than one
|
||||
Elasticsearch node with the same IP address then the transport port must also
|
||||
be given to specify exactly which node is meant:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
|
@ -64,6 +68,37 @@ nodes on the command-line that is used to start Elasticsearch:
|
|||
$ bin/elasticsearch -Ecluster.initial_master_nodes=master-a,master-b,master-c
|
||||
--------------------------------------------------
|
||||
|
||||
[NOTE]
|
||||
==================================================
|
||||
|
||||
[[modules-discovery-bootstrap-cluster-fqdns]] The node names used in the
|
||||
`cluster.initial_master_nodes` list must exactly match the `node.name`
|
||||
properties of the nodes. By default the node name is set to the machine's
|
||||
hostname which may or may not be fully-qualified depending on your system
|
||||
configuration. If each node name is a fully-qualified domain name such as
|
||||
`master-a.example.com` then you must use fully-qualified domain names in the
|
||||
`cluster.initial_master_nodes` list too; conversely if your node names are bare
|
||||
hostnames (without the `.example.com` suffix) then you must use bare hostnames
|
||||
in the `cluster.initial_master_nodes` list. If you use a mix of fully-qualifed
|
||||
and bare hostnames, or there is some other mismatch between `node.name` and
|
||||
`cluster.initial_master_nodes`, then the cluster will not form successfully and
|
||||
you will see log messages like the following.
|
||||
|
||||
[source,text]
|
||||
--------------------------------------------------
|
||||
[master-a.example.com] master not discovered yet, this node has
|
||||
not previously joined a bootstrapped (v7+) cluster, and this
|
||||
node must discover master-eligible nodes [master-a, master-b] to
|
||||
bootstrap a cluster: have discovered [{master-b.example.com}{...
|
||||
--------------------------------------------------
|
||||
|
||||
This message shows the node names `master-a.example.com` and
|
||||
`master-b.example.com` as well as the `cluster.initial_master_nodes` entries
|
||||
`master-a` and `master-b`, and it is clear from this message that they do not
|
||||
match exactly.
|
||||
|
||||
==================================================
|
||||
|
||||
[float]
|
||||
==== Choosing a cluster name
|
||||
|
||||
|
|
|
@ -25,6 +25,23 @@ Discovery and cluster formation are affected by the following settings:
|
|||
compatibility. Support for the old name will be removed in a future
|
||||
version.
|
||||
|
||||
`cluster.initial_master_nodes`::
|
||||
|
||||
Sets a list of the <<node.name,node names>> or transport addresses of the
|
||||
initial set of master-eligible nodes in a brand-new cluster. By default
|
||||
this list is empty, meaning that this node expects to join a cluster that
|
||||
has already been bootstrapped. See <<initial_master_nodes>>.
|
||||
|
||||
[float]
|
||||
==== Expert settings
|
||||
|
||||
Discovery and cluster formation are also affected by the following
|
||||
_expert-level_ settings, although it is not recommended to change any of these
|
||||
from their default values.
|
||||
|
||||
[WARNING] If you adjust these settings then your cluster may not form correctly
|
||||
or may become unstable or intolerant of certain failures.
|
||||
|
||||
`discovery.cluster_formation_warning_timeout`::
|
||||
|
||||
Sets how long a node will try to form a cluster before logging a warning
|
||||
|
@ -49,6 +66,7 @@ Discovery and cluster formation are affected by the following settings:
|
|||
handshake. Defaults to `1s`.
|
||||
|
||||
`discovery.request_peers_timeout`::
|
||||
|
||||
Sets how long a node will wait after asking its peers again before
|
||||
considering the request to have failed. Defaults to `3s`.
|
||||
|
||||
|
@ -83,73 +101,78 @@ Discovery and cluster formation are affected by the following settings:
|
|||
|
||||
Sets the amount to increase the upper bound on the wait before an election
|
||||
on each election failure. Note that this is _linear_ backoff. This defaults
|
||||
to `100ms`
|
||||
to `100ms`. Changing this setting from the default may cause your cluster
|
||||
to fail to elect a master node.
|
||||
|
||||
`cluster.election.duration`::
|
||||
|
||||
Sets how long each election is allowed to take before a node considers it to
|
||||
have failed and schedules a retry. This defaults to `500ms`.
|
||||
Sets how long each election is allowed to take before a node considers it
|
||||
to have failed and schedules a retry. This defaults to `500ms`. Changing
|
||||
this setting from the default may cause your cluster to fail to elect a
|
||||
master node.
|
||||
|
||||
`cluster.election.initial_timeout`::
|
||||
|
||||
Sets the upper bound on how long a node will wait initially, or after the
|
||||
elected master fails, before attempting its first election. This defaults
|
||||
to `100ms`.
|
||||
|
||||
to `100ms`. Changing this setting from the default may cause your cluster
|
||||
to fail to elect a master node.
|
||||
|
||||
`cluster.election.max_timeout`::
|
||||
|
||||
Sets the maximum upper bound on how long a node will wait before attempting
|
||||
an first election, so that an network partition that lasts for a long time
|
||||
does not result in excessively sparse elections. This defaults to `10s`
|
||||
does not result in excessively sparse elections. This defaults to `10s`.
|
||||
Changing this setting from the default may cause your cluster to fail to
|
||||
elect a master node.
|
||||
|
||||
[[fault-detection-settings]]`cluster.fault_detection.follower_check.interval`::
|
||||
|
||||
Sets how long the elected master waits between follower checks to each
|
||||
other node in the cluster. Defaults to `1s`.
|
||||
other node in the cluster. Defaults to `1s`. Changing this setting from the
|
||||
default may cause your cluster to become unstable.
|
||||
|
||||
`cluster.fault_detection.follower_check.timeout`::
|
||||
|
||||
Sets how long the elected master waits for a response to a follower check
|
||||
before considering it to have failed. Defaults to `10s`.
|
||||
before considering it to have failed. Defaults to `10s`. Changing this
|
||||
setting from the default may cause your cluster to become unstable.
|
||||
|
||||
`cluster.fault_detection.follower_check.retry_count`::
|
||||
|
||||
Sets how many consecutive follower check failures must occur to each node
|
||||
before the elected master considers that node to be faulty and removes it
|
||||
from the cluster. Defaults to `3`.
|
||||
from the cluster. Defaults to `3`. Changing this setting from the default
|
||||
may cause your cluster to become unstable.
|
||||
|
||||
`cluster.fault_detection.leader_check.interval`::
|
||||
|
||||
Sets how long each node waits between checks of the elected master.
|
||||
Defaults to `1s`.
|
||||
Defaults to `1s`. Changing this setting from the default may cause your
|
||||
cluster to become unstable.
|
||||
|
||||
`cluster.fault_detection.leader_check.timeout`::
|
||||
|
||||
Sets how long each node waits for a response to a leader check from the
|
||||
elected master before considering it to have failed. Defaults to `10s`.
|
||||
Changing this setting from the default may cause your cluster to become
|
||||
unstable.
|
||||
|
||||
`cluster.fault_detection.leader_check.retry_count`::
|
||||
|
||||
Sets how many consecutive leader check failures must occur before a node
|
||||
considers the elected master to be faulty and attempts to find or elect a
|
||||
new master. Defaults to `3`.
|
||||
new master. Defaults to `3`. Changing this setting from the default may
|
||||
cause your cluster to become unstable.
|
||||
|
||||
`cluster.follower_lag.timeout`::
|
||||
|
||||
Sets how long the master node waits to receive acknowledgements for cluster
|
||||
state updates from lagging nodes. The default value is `90s`. If a node does
|
||||
not successfully apply the cluster state update within this period of time,
|
||||
it is considered to have failed and is removed from the cluster. See
|
||||
state updates from lagging nodes. The default value is `90s`. If a node
|
||||
does not successfully apply the cluster state update within this period of
|
||||
time, it is considered to have failed and is removed from the cluster. See
|
||||
<<cluster-state-publishing>>.
|
||||
|
||||
`cluster.initial_master_nodes`::
|
||||
|
||||
Sets a list of the <<node.name,node names>> or transport addresses of the
|
||||
initial set of master-eligible nodes in a brand-new cluster. By default
|
||||
this list is empty, meaning that this node expects to join a cluster that
|
||||
has already been bootstrapped. See <<initial_master_nodes>>.
|
||||
|
||||
`cluster.join.timeout`::
|
||||
|
||||
Sets how long a node will wait after sending a request to join a cluster
|
||||
|
@ -165,8 +188,7 @@ Discovery and cluster formation are affected by the following settings:
|
|||
`cluster.publish.timeout`::
|
||||
|
||||
Sets how long the master node waits for each cluster state update to be
|
||||
completely published to all nodes. The default value is `30s`. If this
|
||||
period of time elapses, the cluster state change is rejected. See
|
||||
completely published to all nodes. The default value is `30s`. See
|
||||
<<cluster-state-publishing>>.
|
||||
|
||||
[[no-master-block]]`cluster.no_master_block`::
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
[[release-highlights]]
|
||||
= {es} Release highlights
|
||||
++++
|
||||
<titleabbrev>Release highlights</titleabbrev>
|
||||
++++
|
||||
= Release highlights
|
||||
|
||||
[partintro]
|
||||
--
|
||||
|
|
|
@ -82,7 +82,9 @@ This will yield the following result:
|
|||
"compute_max_score": 0,
|
||||
"compute_max_score_count": 0,
|
||||
"shallow_advance": 0,
|
||||
"shallow_advance_count": 0
|
||||
"shallow_advance_count": 0,
|
||||
"set_min_competitive_score": 0,
|
||||
"set_min_competitive_score_count": 0
|
||||
},
|
||||
"children": [
|
||||
{
|
||||
|
@ -105,7 +107,9 @@ This will yield the following result:
|
|||
"compute_max_score": 0,
|
||||
"compute_max_score_count": 0,
|
||||
"shallow_advance": 0,
|
||||
"shallow_advance_count": 0
|
||||
"shallow_advance_count": 0,
|
||||
"set_min_competitive_score": 0,
|
||||
"set_min_competitive_score_count": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -128,7 +132,9 @@ This will yield the following result:
|
|||
"compute_max_score": 0,
|
||||
"compute_max_score_count": 0,
|
||||
"shallow_advance": 0,
|
||||
"shallow_advance_count": 0
|
||||
"shallow_advance_count": 0,
|
||||
"set_min_competitive_score": 0,
|
||||
"set_min_competitive_score_count": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -311,7 +317,9 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen
|
|||
"compute_max_score": 0,
|
||||
"compute_max_score_count": 0,
|
||||
"shallow_advance": 0,
|
||||
"shallow_advance_count": 0
|
||||
"shallow_advance_count": 0,
|
||||
"set_min_competitive_score": 0,
|
||||
"set_min_competitive_score_count": 0
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/]
|
||||
|
@ -575,7 +583,9 @@ And the response:
|
|||
"compute_max_score": 0,
|
||||
"compute_max_score_count": 0,
|
||||
"shallow_advance": 0,
|
||||
"shallow_advance_count": 0
|
||||
"shallow_advance_count": 0,
|
||||
"set_min_competitive_score": 0,
|
||||
"set_min_competitive_score_count": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -598,7 +608,9 @@ And the response:
|
|||
"compute_max_score": 0,
|
||||
"compute_max_score_count": 0,
|
||||
"shallow_advance": 0,
|
||||
"shallow_advance_count": 0
|
||||
"shallow_advance_count": 0,
|
||||
"set_min_competitive_score": 0,
|
||||
"set_min_competitive_score_count": 0
|
||||
}
|
||||
}
|
||||
],
|
||||
|
|
|
@ -44,7 +44,7 @@ GET /my_index/_rank_eval
|
|||
"mean_reciprocal_rank": { ... } <3>
|
||||
}
|
||||
}
|
||||
------------------------------
|
||||
-----------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
<1> a set of typical search requests, together with their provided ratings
|
||||
|
@ -77,7 +77,7 @@ The request section contains several search requests typical to your application
|
|||
]
|
||||
}
|
||||
]
|
||||
------------------------------
|
||||
-----------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
<1> the search requests id, used to group result details later
|
||||
|
|
|
@ -1074,8 +1074,7 @@ they cannot have individual passwords.
|
|||
|
||||
If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path`
|
||||
is a URL using the `https` protocol), the following settings can be used to
|
||||
configure SSL. If these are not specified, then the
|
||||
<<ssl-tls-settings,default SSL settings>> are used.
|
||||
configure SSL.
|
||||
|
||||
NOTE: These settings are not used for any purpose other than loading metadata
|
||||
over https.
|
||||
|
@ -1204,6 +1203,247 @@ If this setting is used, then the Kerberos realm does not perform role mapping a
|
|||
instead loads the user from the listed realms.
|
||||
See {stack-ov}/realm-chains.html#authorization_realms[Delegating authorization to another realm]
|
||||
|
||||
[[ref-oidc-settings]]
|
||||
[float]
|
||||
===== OpenID Connect realm settings
|
||||
|
||||
In addition to the <<ref-realm-settings,settings that are valid for all realms>>, you
|
||||
can specify the following settings:
|
||||
|
||||
`op.issuer`::
|
||||
A verifiable Identifier for your OpenID Connect Provider. An Issuer
|
||||
Identifier is usually a case sensitive URL using the https scheme that contains
|
||||
scheme, host, and optionally, port number and path components and no query or
|
||||
fragment components. The value for this setting should be provided by your OpenID
|
||||
Connect Provider.
|
||||
|
||||
`op.authorization_endpoint`::
|
||||
The URL for the Authorization Endpoint at the
|
||||
OpenID Connect Provider. The value for this setting should be provided by your OpenID
|
||||
Connect Provider.
|
||||
|
||||
`op.token_endpoint`::
|
||||
The URL for the Token Endpoint at the OpenID Connect Provider.
|
||||
The value for this setting should be provided by your OpenID Connect Provider.
|
||||
|
||||
`op.userinfo_endpoint`::
|
||||
The URL for the User Info Endpoint at the OpenID Connect Provider.
|
||||
The value for this setting should be provided by your OpenID Connect Provider.
|
||||
|
||||
`op.endsession_endpoint`::
|
||||
The URL for the End Session Endpoint at the OpenID Connect
|
||||
Provider. The value for this setting should be provided by your OpenID Connect Provider.
|
||||
|
||||
`op.jwkset_path`::
|
||||
The path or URL to a JSON Web Key Set with the key material that the OpenID Connect
|
||||
Provider uses for signing tokens and claims responses.
|
||||
If a path is provided, then it is resolved relative to the {es} config
|
||||
directory.
|
||||
If a URL is provided, then it must be either a `file` URL or a `https` URL.
|
||||
{es} automatically caches the retrieved JWK set to avoid unnecessary HTTP
|
||||
requests but will attempt to refresh the JWK upon signature verification
|
||||
failure, as this might indicate that the OpenID Connect Provider has
|
||||
rotated the signing keys.
|
||||
|
||||
File based resources are polled at a frequency determined by the global {es}
|
||||
`resource.reload.interval.high` setting, which defaults to 5 seconds.
|
||||
|
||||
`rp.client_id`::
|
||||
The OAuth 2.0 Client Identifier that was assigned to {es} during registration
|
||||
at the OpenID Connect Provider
|
||||
|
||||
`rp.client_secret`(<<secure-settings,Secure>>)::
|
||||
The OAuth 2.0 Client Secret that was assigned to {es} during registration
|
||||
at the OpenID Connect Provider
|
||||
|
||||
`rp.redirect_uri`::
|
||||
The Redirect URI within {kib}. Typically this is the
|
||||
"api/security/v1/oidc" endpoint of your Kibana server. For example,
|
||||
`https://kibana.example.com/api/security/v1/oidc`.
|
||||
|
||||
`rp.response_type`::
|
||||
OAuth 2.0 Response Type value that determines the authorization
|
||||
processing flow to be used. Can be `code` for authorization code grant flow,
|
||||
or one of `id_token`, `id_token token` for the implicit flow.
|
||||
|
||||
`rp.signature_algorithm`::
|
||||
The signature algorithm that will be used by {es} in order to verify the
|
||||
signature of the id tokens it will receive from the OpenID Connect Provider.
|
||||
Defaults to `RSA256`
|
||||
|
||||
`rp.requested_scopes`::
|
||||
The scope values that will be requested by the OpenID Connect Provider as
|
||||
part of the Authentication Request. Optional, defaults to `openid`
|
||||
|
||||
`rp.post_logout_redirect_uri`::
|
||||
The Redirect URI (usually within {kib}) that the OpenID Connect Provider
|
||||
should redirect the browser to after a successful Single Logout.
|
||||
|
||||
`claims.principal`::
|
||||
The name of the OpenID Connect claim that contains the user's principal (username).
|
||||
|
||||
`claims.groups`::
|
||||
The name of the OpenID Connect claim that contains the user's groups.
|
||||
|
||||
`claims.name`::
|
||||
The name of the OpenID Connect claim that contains the user's full name.
|
||||
|
||||
`claims.mail`::
|
||||
The name of the OpenID Connect claim that contains the user's email address.
|
||||
|
||||
`claims.dn`::
|
||||
The name of the OpenID Connect claim that contains the user's X.509
|
||||
_Distinguished Name_.
|
||||
|
||||
`claim_patterns.principal`::
|
||||
A Java regular expression that is matched against the OpenID Connect claim specified
|
||||
by `claims.principal` before it is applied to the user's _principal_ property.
|
||||
The attribute value must match the pattern and the value of the first
|
||||
_capturing group_ is used as the principal. For example, `^([^@]+)@example\\.com$`
|
||||
matches email addresses from the "example.com" domain and uses the local-part as
|
||||
the principal.
|
||||
|
||||
`claim_patterns.groups`::
|
||||
As per `claim_patterns.principal`, but for the _group_ property.
|
||||
|
||||
`claim_patterns.name`::
|
||||
As per `claim_patterns.principal`, but for the _name_ property.
|
||||
|
||||
`claim_patterns.mail`::
|
||||
As per `claim_patterns.principal`, but for the _mail_ property.
|
||||
|
||||
`claim_patterns.dn`::
|
||||
As per `claim_patterns.principal`, but for the _dn_ property.
|
||||
|
||||
|
||||
`allowed_clock_skew`::
|
||||
The maximum allowed clock skew to be taken into consideration when validating
|
||||
id tokens with regards to their creation and expiration times.
|
||||
|
||||
`populate_user_metadata`::
|
||||
Specifies whether to populate the {es} user's metadata with the values that are
|
||||
provided by the OpenID Connect claims. Defaults to `true`.
|
||||
|
||||
`http.connect_timeout`::
|
||||
Controls the behavior of the http client used for back-channel communication to
|
||||
the OpenID Connect Provider endpoints. Specifies the timeout until a connection
|
||||
is established. A value of zero means the timeout is not used. Defaults to `5s`
|
||||
|
||||
`http.connection_read_timeout`::
|
||||
Controls the behavior of the http client used for back-channel communication to
|
||||
the OpenID Connect Provider endpoints. Specifies the timeout used when
|
||||
requesting a connection from the connection manager. Defaults to `5s`
|
||||
|
||||
`http.socket_timeout`::
|
||||
Controls the behavior of the http client used for back-channel communication to
|
||||
the OpenID Connect Provider endpoints. Specifies the socket timeout (SO_TIMEOUT)
|
||||
in milliseconds, which is the timeout for waiting for data or, put differently,
|
||||
a maximum period inactivity between two consecutive data packets). Defaults to
|
||||
`5s`
|
||||
|
||||
`http.max_connections`::
|
||||
Controls the behavior of the http client used for back-channel communication to
|
||||
the OpenID Connect Provider endpoints. Specifies the maximum number of
|
||||
connections allowed across all endpoints.
|
||||
|
||||
`http.max_endpoint_connections`::
|
||||
Controls the behavior of the http client used for back-channel communication to
|
||||
the OpenID Connect Provider endpoints. Specifies the maximum number of
|
||||
connections allowed per endpoint.
|
||||
|
||||
[float]
|
||||
[[ref-oidc-ssl-settings]]
|
||||
===== OpenID Connect realm SSL settings
|
||||
|
||||
The following settings can be used to configure SSL for all outgoing http connections
|
||||
to the OpenID Connect Provider endpoints.
|
||||
|
||||
NOTE: These settings are _only_ used for the back-channel communication between
|
||||
{es} and the OpenID Connect Provider
|
||||
|
||||
`ssl.key`::
|
||||
Specifies the path to the PEM encoded private key to use for http client
|
||||
authentication (if required). `ssl.key` and `ssl.keystore.path` cannot be used
|
||||
at the same time.
|
||||
|
||||
`ssl.key_passphrase`::
|
||||
Specifies the passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
|
||||
encrypted. Cannot be used with `ssl.secure_key_passphrase`.
|
||||
|
||||
`ssl.secure_key_passphrase` (<<secure-settings,Secure>>)::
|
||||
Specifies the passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
|
||||
encrypted. Cannot be used with `ssl.key_passphrase`.
|
||||
|
||||
`ssl.certificate`::
|
||||
Specifies the path to the PEM encoded certificate (or certificate chain) that is associated
|
||||
with the key (`ssl.key`). This setting can be used only if `ssl.key` is set.
|
||||
|
||||
`ssl.certificate_authorities`::
|
||||
Specifies the paths to the PEM encoded certificate authority certificates that should be
|
||||
trusted. `ssl.certificate_authorities` and `ssl.truststore.path` cannot be
|
||||
used at the same time.
|
||||
|
||||
`ssl.keystore.path`::
|
||||
Specifies the path to the keystore that contains a private key and certificate.
|
||||
Must be either a Java Keystore (jks) or a PKCS#12 file.
|
||||
`ssl.key` and `ssl.keystore.path` cannot be used at the same time.
|
||||
|
||||
`ssl.keystore.type`::
|
||||
The type of the keystore (`ssl.keystore.path`). Must be either `jks` or `PKCS12`.
|
||||
If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults
|
||||
to `PKCS12`. Otherwise, it defaults to `jks`.
|
||||
|
||||
`ssl.keystore.password`::
|
||||
The password to the keystore (`ssl.keystore.path`). This setting cannot be used
|
||||
with `ssl.keystore.secure_password`.
|
||||
|
||||
`ssl.keystore.secure_password` (<<secure-settings,Secure>>)::
|
||||
The password to the keystore (`ssl.keystore.path`).
|
||||
This setting cannot be used with `ssl.keystore.password`.
|
||||
|
||||
`ssl.keystore.key_password`::
|
||||
The password for the key in the keystore (`ssl.keystore.path`).
|
||||
Defaults to the keystore password. This setting cannot be used with
|
||||
`ssl.keystore.secure_key_password`.
|
||||
|
||||
`ssl.keystore.secure_key_password` (<<secure-settings,Secure>>)::
|
||||
The password for the key in the keystore (`ssl.keystore.path`).
|
||||
Defaults to the keystore password. This setting cannot be used with
|
||||
`ssl.keystore.key_password`.
|
||||
|
||||
`ssl.truststore.path`::
|
||||
The path to the keystore that contains the certificates to trust.
|
||||
Must be either a Java Keystore (jks) or a PKCS#12 file.
|
||||
`ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the
|
||||
same time.
|
||||
|
||||
`ssl.truststore.type`::
|
||||
The type of the truststore (`ssl.truststore.path`). Must be either `jks` or
|
||||
`PKCS12`. If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting
|
||||
defaults to `PKCS12`. Otherwise, it defaults to `jks`.
|
||||
|
||||
`ssl.truststore.password`::
|
||||
The password to the truststore (`ssl.truststore.path`). This setting cannot be
|
||||
used with `ssl.truststore.secure_password`.
|
||||
|
||||
`ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
|
||||
The password to the truststore (`ssl.truststore.path`). This setting cannot be
|
||||
used with `ssl.truststore.password`.
|
||||
|
||||
`ssl.verification_mode`::
|
||||
One of `full` (verify the hostname and the certificate path), `certificate` (verify the
|
||||
certificate path, but not the hostname) or `none` (perform no verification).
|
||||
Defaults to `full`.
|
||||
+
|
||||
See <<ssl-tls-settings,`ssl.verification_mode`>> for a more detailed explanation of these values.
|
||||
|
||||
`ssl.supported_protocols`::
|
||||
Specifies the supported protocols for TLS/SSL. Defaults to `TLSv1.3,TLSv1.2,TLSv1.1` if
|
||||
the JVM supports TLSv1.3, otherwise `TLSv1.2,TLSv1.1`.
|
||||
|
||||
`ssl.cipher_suites`::
|
||||
Specifies the cipher suites that should be supported.
|
||||
|
||||
[float]
|
||||
[[load-balancing]]
|
||||
===== Load balancing and failover
|
||||
|
|
|
@ -25,16 +25,15 @@ platforms, but it is possible that it will work on other platforms too.
|
|||
|
||||
Elasticsearch is built using Java, and includes a bundled version of
|
||||
http://openjdk.java.net[OpenJDK] from the JDK maintainers (GPLv2+CE)
|
||||
within each distribution. The bundled JVM exists within the `jdk` directory of
|
||||
the Elasticsearch home directory.
|
||||
within each distribution. The bundled JVM is the recommended JVM and
|
||||
is located within the `jdk` directory of the Elasticsearch home directory.
|
||||
|
||||
To use your own version of Java, set the `JAVA_HOME` environment variable.
|
||||
When using your own version, the bundled JVM directory may be removed.
|
||||
If not using the bundled JVM, we recommend installing Java version
|
||||
*{jdk} or a later version in the Java {jdk_major} release series*. We recommend
|
||||
using a link:/support/matrix[supported]
|
||||
If you must use a version of Java that is different from the bundled JVM,
|
||||
we recommend using a link:/support/matrix[supported]
|
||||
http://www.oracle.com/technetwork/java/eol-135779.html[LTS version of Java].
|
||||
Elasticsearch will refuse to start if a known-bad version of Java is used.
|
||||
The bundled JVM directory may be removed when using your own JVM.
|
||||
|
||||
--
|
||||
|
||||
|
|
|
@ -58,9 +58,17 @@ cluster.initial_master_nodes:
|
|||
`transport.port` if not specified.
|
||||
<2> If a hostname resolves to multiple IP addresses then the node will attempt to
|
||||
discover other nodes at all resolved addresses.
|
||||
<3> Initial master nodes can be identified by their <<node.name,node name>>.
|
||||
<3> Initial master nodes can be identified by their <<node.name,`node.name`>>,
|
||||
which defaults to the hostname. Make sure that the value in
|
||||
`cluster.initial_master_nodes` matches the `node.name` exactly. If you use
|
||||
a fully-qualified domain name such as `master-node-a.example.com` for your
|
||||
node names then you must use the fully-qualified name in this list;
|
||||
conversely if `node.name` is a bare hostname without any trailing
|
||||
qualifiers then you must also omit the trailing qualifiers in
|
||||
`cluster.initial_master_nodes`.
|
||||
<4> Initial master nodes can also be identified by their IP address.
|
||||
<5> If multiple master nodes share an IP address then the port must be used to
|
||||
disambiguate them.
|
||||
<5> If multiple master nodes share an IP address then the transport port must
|
||||
be used to distinguish between them.
|
||||
|
||||
For more information, see <<modules-discovery-settings>>.
|
||||
For more information, see <<modules-discovery-bootstrap-cluster>> and
|
||||
<<modules-discovery-settings>>.
|
||||
|
|
|
@ -82,7 +82,7 @@ The `vm.max_map_count` setting should be set permanently in `/etc/sysctl.conf`:
|
|||
--------------------------------------------
|
||||
$ grep vm.max_map_count /etc/sysctl.conf
|
||||
vm.max_map_count=262144
|
||||
----------------------------------
|
||||
--------------------------------------------
|
||||
|
||||
To apply the setting on a live system type: `sysctl -w vm.max_map_count=262144`
|
||||
--
|
||||
|
@ -169,6 +169,7 @@ services:
|
|||
container_name: es01
|
||||
environment:
|
||||
- node.name=es01
|
||||
- discovery.seed_hosts=es02
|
||||
- cluster.initial_master_nodes=es01,es02
|
||||
- cluster.name=docker-cluster
|
||||
- bootstrap.memory_lock=true
|
||||
|
|
|
@ -51,14 +51,21 @@ Once registered, the driver understands the following syntax as an URL:
|
|||
|
||||
["source","text",subs="attributes"]
|
||||
----
|
||||
jdbc:es://<1>[[http|https]://]*<2>[host[:port]]*<3>/[prefix]*<4>[?[option=value]&<5>]*
|
||||
jdbc:es://[[http|https]://]*[host[:port]]*/[prefix]*<[?[option=value]&]*
|
||||
----
|
||||
`jdbc:es://`:: Prefix. Mandatory.
|
||||
|
||||
<1> `jdbc:es://` prefix. Mandatory.
|
||||
<2> type of HTTP connection to make - `http` (default) or `https`. Optional.
|
||||
<3> host (`localhost` by default) and port (`9200` by default). Optional.
|
||||
<4> prefix (empty by default). Typically used when hosting {es} under a certain path. Optional.
|
||||
<5> Properties for the JDBC driver. Empty by default. Optional.
|
||||
`[[http|https]://]`:: Type of HTTP connection to make. Possible values are
|
||||
`http` (default) or `https`. Optional.
|
||||
|
||||
`[host[:port]]`:: Host (`localhost` by default) and port (`9200` by default).
|
||||
Optional.
|
||||
|
||||
`[prefix]`:: Prefix (empty by default). Typically used when hosting {es} under
|
||||
a certain path. Optional.
|
||||
|
||||
`[option=value]`:: Properties for the JDBC driver. Empty by default.
|
||||
Optional.
|
||||
|
||||
The driver recognized the following properties:
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ Functions for computing a _single_ result from a set of input values.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
AVG(numeric_field<1>)
|
||||
AVG(numeric_field) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -40,7 +40,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggAvg]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
COUNT(expression<1>)
|
||||
COUNT(expression) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -70,7 +70,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountStar]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
COUNT(ALL field_name<1>)
|
||||
COUNT(ALL field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -95,7 +95,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountAll]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
COUNT(DISTINCT field_name<1>)
|
||||
COUNT(DISTINCT field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -119,7 +119,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountDistinct]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----------------------------------------------
|
||||
FIRST(field_name<1>[, ordering_field_name]<2>)
|
||||
FIRST(
|
||||
field_name <1>
|
||||
[, ordering_field_name]) <2>
|
||||
----------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -214,7 +216,9 @@ the field is also <<before-enabling-fielddata,saved as a keyword>>.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LAST(field_name<1>[, ordering_field_name]<2>)
|
||||
LAST(
|
||||
field_name <1>
|
||||
[, ordering_field_name]) <2>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -309,7 +313,7 @@ the field is also <<before-enabling-fielddata,`saved as a keyword`>>.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MAX(field_name<1>)
|
||||
MAX(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -337,7 +341,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMax]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MIN(field_name<1>)
|
||||
MIN(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -365,7 +369,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMin]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SUM(field_name<1>)
|
||||
SUM(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -393,7 +397,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSum]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
KURTOSIS(field_name<1>)
|
||||
KURTOSIS(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -417,7 +421,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggKurtosis]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MAD(field_name<1>)
|
||||
MAD(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -441,7 +445,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMad]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
PERCENTILE(field_name<1>, numeric_exp<2>)
|
||||
PERCENTILE(
|
||||
field_name, <1>
|
||||
numeric_exp) <2>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -467,7 +473,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentile]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
PERCENTILE_RANK(field_name<1>, numeric_exp<2>)
|
||||
PERCENTILE_RANK(
|
||||
field_name, <1>
|
||||
numeric_exp) <2>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -493,7 +501,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentileRank]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SKEWNESS(field_name<1>)
|
||||
SKEWNESS(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -517,7 +525,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSkewness]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
STDDEV_POP(field_name<1>)
|
||||
STDDEV_POP(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -541,7 +549,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevPop]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SUM_OF_SQUARES(field_name<1>)
|
||||
SUM_OF_SQUARES(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -565,7 +573,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSumOfSquares]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
VAR_POP(field_name<1>)
|
||||
VAR_POP(field_name) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
|
|
@ -11,7 +11,10 @@ Functions that return one of their arguments by evaluating in an if-else manner.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
COALESCE(expression<1>, expression<2>, ...)
|
||||
COALESCE(
|
||||
expression, <1>
|
||||
expression, <2>
|
||||
...)
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
@ -51,7 +54,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNull]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
GREATEST(expression<1>, expression<2>, ...)
|
||||
GREATEST(
|
||||
expression, <1>
|
||||
expression, <2>
|
||||
...)
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
@ -92,7 +98,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNull]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
IFNULL(expression<1>, expression<2>)
|
||||
IFNULL(
|
||||
expression, <1>
|
||||
expression) <2>
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
@ -129,7 +137,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
ISNULL(expression<1>, expression<2>)
|
||||
ISNULL(
|
||||
expression, <1>
|
||||
expression) <2>
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
@ -166,7 +176,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnSecond]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
LEAST(expression<1>, expression<2>, ...)
|
||||
LEAST(
|
||||
expression, <1>
|
||||
expression, <2>
|
||||
...)
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
@ -208,7 +221,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNull]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
NULLIF(expression<1>, expression<2>)
|
||||
NULLIF(
|
||||
expression, <1>
|
||||
expression) <2>
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
@ -243,7 +258,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnNull]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
NVL(expression<1>, expression<2>)
|
||||
NVL(
|
||||
expression, <1>
|
||||
expression) <2>
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
|
|
@ -146,7 +146,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday]
|
|||
[source, sql]
|
||||
--------------------------------------------------
|
||||
CURRENT_TIME
|
||||
CURRENT_TIME([precision <1>])
|
||||
CURRENT_TIME([precision]) <1>
|
||||
CURTIME
|
||||
--------------------------------------------------
|
||||
|
||||
|
@ -203,7 +203,7 @@ function as the maximum number of second fractional digits returned is 3 (millis
|
|||
[source, sql]
|
||||
--------------------------------------------------
|
||||
CURRENT_TIMESTAMP
|
||||
CURRENT_TIMESTAMP([precision <1>])
|
||||
CURRENT_TIMESTAMP([precision]) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -254,7 +254,7 @@ function as the maximum number of second fractional digits returned is 3 (millis
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
DAY_OF_MONTH(datetime_exp<1>)
|
||||
DAY_OF_MONTH(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -278,7 +278,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfMonth]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
DAY_OF_WEEK(datetime_exp<1>)
|
||||
DAY_OF_WEEK(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -302,7 +302,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfWeek]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
DAY_OF_YEAR(datetime_exp<1>)
|
||||
DAY_OF_YEAR(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -326,7 +326,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayOfYear]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
DAY_NAME(datetime_exp<1>)
|
||||
DAY_NAME(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -350,7 +350,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[dayName]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
HOUR_OF_DAY(datetime_exp<1>)
|
||||
HOUR_OF_DAY(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -374,7 +374,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[hourOfDay]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ISO_DAY_OF_WEEK(datetime_exp<1>)
|
||||
ISO_DAY_OF_WEEK(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -399,7 +399,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[isoDayOfWeek]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ISO_WEEK_OF_YEAR(datetime_exp<1>)
|
||||
ISO_WEEK_OF_YEAR(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -424,7 +424,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[isoWeekOfYear]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MINUTE_OF_DAY(datetime_exp<1>)
|
||||
MINUTE_OF_DAY(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -448,7 +448,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfDay]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MINUTE_OF_HOUR(datetime_exp<1>)
|
||||
MINUTE_OF_HOUR(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -472,7 +472,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[minuteOfHour]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MONTH(datetime_exp<1>)
|
||||
MONTH(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -496,7 +496,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[monthOfYear]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MONTH_NAME(datetime_exp<1>)
|
||||
MONTH_NAME(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -552,7 +552,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SECOND_OF_MINUTE(datetime_exp<1>)
|
||||
SECOND_OF_MINUTE(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -576,7 +576,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[secondOfMinute]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
QUARTER(datetime_exp<1>)
|
||||
QUARTER(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -632,7 +632,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[filterToday]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
WEEK_OF_YEAR(datetime_exp<1>)
|
||||
WEEK_OF_YEAR(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -656,7 +656,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[weekOfYear]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
YEAR(datetime_exp<1>)
|
||||
YEAR(datetime_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -680,7 +680,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[year]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
EXTRACT(datetime_function<1> FROM datetime_exp<2>)
|
||||
EXTRACT(
|
||||
datetime_function <1>
|
||||
FROM datetime_exp) <2>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
|
|
@ -12,8 +12,13 @@ as part of the <<sql-syntax-group-by, grouping>>.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
HISTOGRAM(numeric_exp<1>, numeric_interval<2>)
|
||||
HISTOGRAM(date_exp<3>, date_time_interval<4>)
|
||||
HISTOGRAM(
|
||||
numeric_exp, <1>
|
||||
numeric_interval) <2>
|
||||
|
||||
HISTOGRAM(
|
||||
date_exp, <3>
|
||||
date_time_interval) <4>
|
||||
----
|
||||
|
||||
*Input*:
|
||||
|
|
|
@ -20,7 +20,8 @@ or has an exact sub-field, it will use it as is, or it will automatically use th
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
expression<1> LIKE constant_exp<2>
|
||||
expression <1>
|
||||
LIKE constant_exp <2>
|
||||
--------------------------------------------------
|
||||
|
||||
<1> typically a field, or a constant expression
|
||||
|
@ -59,7 +60,8 @@ IMPORTANT: Even though `LIKE` is a valid option when searching or filtering in {
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
expression<1> RLIKE constant_exp<2>
|
||||
expression <1>
|
||||
RLIKE constant_exp <2>
|
||||
--------------------------------------------------
|
||||
|
||||
<1> typically a field, or a constant expression
|
||||
|
|
|
@ -16,7 +16,7 @@ to be numeric.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ABS(numeric_exp<1>)
|
||||
ABS(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -40,7 +40,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[abs]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
CBRT(numeric_exp<1>)
|
||||
CBRT(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -64,7 +64,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCbrtWithNegativeValue]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
CEIL(numeric_exp<1>)
|
||||
CEIL(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -110,7 +110,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathEulersNumber]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
EXP(numeric_exp<1>)
|
||||
EXP(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -134,7 +134,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpInline]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
EXPM1(numeric_exp<1>)
|
||||
EXPM1(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -158,7 +158,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpm1Inline]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
FLOOR(numeric_exp<1>)
|
||||
FLOOR(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -182,7 +182,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineFloor]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LOG(numeric_exp<1>)
|
||||
LOG(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -206,7 +206,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineLog]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LOG10(numeric_exp<1>)
|
||||
LOG10(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -252,7 +252,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathPINumber]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
POWER(numeric_exp<1>, integer_exp<2>)
|
||||
POWER(
|
||||
numeric_exp, <1>
|
||||
integer_exp) <2>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -282,7 +284,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlinePowerNegative]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
RANDOM(seed<1>)
|
||||
RANDOM(seed) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -306,7 +308,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathRandom]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
ROUND(numeric_exp<1>[, integer_exp<2>])
|
||||
ROUND(
|
||||
numeric_exp <1>
|
||||
[, integer_exp]) <2>
|
||||
----
|
||||
*Input*:
|
||||
|
||||
|
@ -337,7 +341,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathRoundWithNegativeParameter]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SIGN(numeric_exp<1>)
|
||||
SIGN(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -362,7 +366,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSign]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SQRT(numeric_exp<1>)
|
||||
SQRT(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -386,7 +390,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSqrt]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
TRUNCATE(numeric_exp<1>[, integer_exp<2>])
|
||||
TRUNCATE(
|
||||
numeric_exp <1>
|
||||
[, integer_exp]) <2>
|
||||
----
|
||||
*Input*:
|
||||
|
||||
|
@ -421,7 +427,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathTruncateWithNegativeParameter
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ACOS(numeric_exp<1>)
|
||||
ACOS(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -445,7 +451,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAcos]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ASIN(numeric_exp<1>)
|
||||
ASIN(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -469,7 +475,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAsin]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ATAN(numeric_exp<1>)
|
||||
ATAN(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -493,7 +499,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ATAN2(ordinate<1>, abscisa<2>)
|
||||
ATAN2(
|
||||
ordinate, <1>
|
||||
abscisa) <2>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -518,7 +526,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan2]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
COS(numeric_exp<1>)
|
||||
COS(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -542,7 +550,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosine]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
COSH(numeric_exp<1>)
|
||||
COSH(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -566,7 +574,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosh]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
COT(numeric_exp<1>)
|
||||
COT(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -590,7 +598,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCotangent]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
DEGREES(numeric_exp<1>)
|
||||
DEGREES(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -615,7 +623,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineDegrees]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
RADIANS(numeric_exp<1>)
|
||||
RADIANS(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -640,7 +648,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineRadians]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SIN(numeric_exp<1>)
|
||||
SIN(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -664,7 +672,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSine]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SINH(numeric_exp<1>)
|
||||
SINH(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -688,7 +696,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSinh]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
TAN(numeric_exp<1>)
|
||||
TAN(numeric_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
|
|
@ -14,7 +14,10 @@ such as `0` or `NULL`.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
MATCH(field_exp<1>, constant_exp<2>[, options]<3>)
|
||||
MATCH(
|
||||
field_exp, <1>
|
||||
constant_exp <2>
|
||||
[, options]) <3>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -75,7 +78,9 @@ NOTE: The allowed optional parameters for a multi-field `MATCH()` variant (for t
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
QUERY(constant_exp<1>[, options]<2>)
|
||||
QUERY(
|
||||
constant_exp <1>
|
||||
[, options]) <2>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
|
|
@ -11,7 +11,7 @@ Functions for performing string manipulation.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ASCII(string_exp<1>)
|
||||
ASCII(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
@ -35,7 +35,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringAscii]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
BIT_LENGTH(string_exp<1>)
|
||||
BIT_LENGTH(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -58,7 +58,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringBitLength]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
CHAR(code<1>)
|
||||
CHAR(code) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -81,7 +81,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringChar]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
CHAR_LENGTH(string_exp<1>)
|
||||
CHAR_LENGTH(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -104,7 +104,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringCharLength]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
CONCAT(string_exp1<1>,string_exp2<2>)
|
||||
CONCAT(
|
||||
string_exp1, <1>
|
||||
string_exp2) <2>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -128,7 +130,11 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringConcat]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
INSERT(source<1>, start<2>, length<3>, replacement<4>)
|
||||
INSERT(
|
||||
source, <1>
|
||||
start, <2>
|
||||
length, <3>
|
||||
replacement) <4>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -154,7 +160,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringInsert]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LCASE(string_exp<1>)
|
||||
LCASE(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -177,7 +183,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLCase]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LEFT(string_exp<1>, count<2>)
|
||||
LEFT(
|
||||
string_exp, <1>
|
||||
count) <2>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -201,7 +209,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLeft]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LENGTH(string_exp<1>)
|
||||
LENGTH(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -224,7 +232,11 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLength]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LOCATE(pattern<1>, source<2>[, start]<3>)
|
||||
LOCATE(
|
||||
pattern, <1>
|
||||
source <2>
|
||||
[, start]<3>
|
||||
)
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -254,7 +266,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLocateWithStart]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
LTRIM(string_exp<1>)
|
||||
LTRIM(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -277,7 +289,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringLTrim]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
OCTET_LENGTH(string_exp<1>)
|
||||
OCTET_LENGTH(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -300,7 +312,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringOctetLength]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
POSITION(string_exp1<1>, string_exp2<2>)
|
||||
POSITION(
|
||||
string_exp1, <1>
|
||||
string_exp2) <2>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -324,7 +338,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringPosition]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
REPEAT(string_exp<1>, count<2>)
|
||||
REPEAT(
|
||||
string_exp, <1>
|
||||
count) <2>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -348,7 +364,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringRepeat]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
REPLACE(source<1>, pattern<2>, replacement<3>)
|
||||
REPLACE(
|
||||
source, <1>
|
||||
pattern, <2>
|
||||
replacement) <3>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -373,7 +392,9 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringReplace]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
RIGHT(string_exp<1>, count<2>)
|
||||
RIGHT(
|
||||
string_exp, <1>
|
||||
count) <2>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -397,7 +418,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringRight]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
RTRIM(string_exp<1>)
|
||||
RTRIM(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -420,7 +441,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringRTrim]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SPACE(count<1>)
|
||||
SPACE(count) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -443,7 +464,10 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringSpace]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
SUBSTRING(source<1>, start<2>, length<3>)
|
||||
SUBSTRING(
|
||||
source, <1>
|
||||
start, <2>
|
||||
length) <3>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
@ -468,7 +492,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[stringSubString]
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
UCASE(string_exp<1>)
|
||||
UCASE(string_exp) <1>
|
||||
--------------------------------------------------
|
||||
*Input*:
|
||||
|
||||
|
|
|
@ -11,7 +11,9 @@ Functions for converting an expression of one data type to another.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
CAST(expression<1> AS data_type<2>)
|
||||
CAST(
|
||||
expression <1>
|
||||
AS data_type) <2>
|
||||
----
|
||||
|
||||
<1> Expression to cast
|
||||
|
@ -50,7 +52,9 @@ To obtain an {es} `float`, perform casting to its SQL equivalent, `real` type.
|
|||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
CONVERT(expression<1>, data_type<2>)
|
||||
CONVERT(
|
||||
expression, <1>
|
||||
data_type) <2>
|
||||
----
|
||||
|
||||
<1> Expression to convert
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
.Synopsis
|
||||
[source, sql]
|
||||
----
|
||||
DESCRIBE [table identifier<1> | [LIKE pattern<2>]]
|
||||
DESCRIBE
|
||||
[table identifier | <1>
|
||||
[LIKE pattern]] <2>
|
||||
----
|
||||
|
||||
<1> single table identifier or double quoted es multi index
|
||||
|
@ -16,7 +18,9 @@ or
|
|||
|
||||
[source, sql]
|
||||
----
|
||||
DESC [table identifier<1>|[LIKE pattern<2>]]
|
||||
DESC
|
||||
[table identifier | <1>
|
||||
[LIKE pattern]] <2>
|
||||
----
|
||||
|
||||
<1> single table identifier or double quoted es multi index
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
.Synopsis
|
||||
[source, sql]
|
||||
----
|
||||
SHOW COLUMNS [ FROM | IN ]? [ table identifier<1> | [ LIKE pattern<2> ] ]
|
||||
SHOW COLUMNS [ FROM | IN ]?
|
||||
[table identifier | <1>
|
||||
[LIKE pattern] ] <2>
|
||||
----
|
||||
|
||||
<1> single table identifier or double quoted es multi index
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
.Synopsis
|
||||
[source, sql]
|
||||
----
|
||||
SHOW FUNCTIONS [ LIKE pattern<1>? ]?
|
||||
SHOW FUNCTIONS [LIKE pattern?]? <1>
|
||||
----
|
||||
|
||||
<1> SQL match pattern
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
.Synopsis
|
||||
[source, sql]
|
||||
----
|
||||
SHOW TABLES [ table identifier<1> | [ LIKE pattern<2> ] ]?
|
||||
SHOW TABLES
|
||||
[table identifier | <1>
|
||||
[LIKE pattern ]]? <2>
|
||||
----
|
||||
|
||||
<1> single table identifier or double quoted es multi index
|
||||
|
|
|
@ -55,7 +55,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -107,7 +106,7 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
|
|||
return new Response();
|
||||
}
|
||||
|
||||
public static class Request extends SingleShardRequest<Request> implements ToXContent {
|
||||
public static class Request extends SingleShardRequest<Request> implements ToXContentObject {
|
||||
|
||||
private static final ParseField SCRIPT_FIELD = new ParseField("script");
|
||||
private static final ParseField CONTEXT_FIELD = new ParseField("context");
|
||||
|
|
|
@ -95,12 +95,6 @@ dependencies {
|
|||
es090 'org.elasticsearch:elasticsearch:0.90.13@zip'
|
||||
}
|
||||
|
||||
// Issue tracked in https://github.com/elastic/elasticsearch/issues/40904
|
||||
if (project.inFipsJvm) {
|
||||
testingConventions.enabled = false
|
||||
integTest.enabled = false
|
||||
}
|
||||
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows")
|
||||
integTest.runner {
|
||||
|
|
|
@ -70,6 +70,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
public static final String NULL_VALUE = null;
|
||||
public static final int IGNORE_ABOVE = Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
public static final class CollationFieldType extends StringFieldType {
|
||||
|
@ -226,6 +227,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
private boolean numeric = false;
|
||||
private String variableTop = null;
|
||||
private boolean hiraganaQuaternaryMode = false;
|
||||
protected int ignoreAbove = Defaults.IGNORE_ABOVE;
|
||||
|
||||
public Builder(String name) {
|
||||
super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
|
||||
|
@ -247,6 +249,14 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
return super.indexOptions(indexOptions);
|
||||
}
|
||||
|
||||
public Builder ignoreAbove(int ignoreAbove) {
|
||||
if (ignoreAbove < 0) {
|
||||
throw new IllegalArgumentException("[ignore_above] must be positive, got " + ignoreAbove);
|
||||
}
|
||||
this.ignoreAbove = ignoreAbove;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String rules() {
|
||||
return rules;
|
||||
}
|
||||
|
@ -458,7 +468,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
setupFieldType(context);
|
||||
return new ICUCollationKeywordFieldMapper(name, fieldType, defaultFieldType, context.indexSettings(),
|
||||
multiFieldsBuilder.build(this, context), copyTo, rules, language, country, variant, strength, decomposition,
|
||||
alternate, caseLevel, caseFirst, numeric, variableTop, hiraganaQuaternaryMode, collator);
|
||||
alternate, caseLevel, caseFirst, numeric, variableTop, hiraganaQuaternaryMode, ignoreAbove, collator);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -480,6 +490,10 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
builder.nullValue(fieldNode.toString());
|
||||
iterator.remove();
|
||||
break;
|
||||
case "ignore_above":
|
||||
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(fieldNode, -1));
|
||||
iterator.remove();
|
||||
break;
|
||||
case "norms":
|
||||
builder.omitNorms(!XContentMapValues.nodeBooleanValue(fieldNode, "norms"));
|
||||
iterator.remove();
|
||||
|
@ -553,13 +567,15 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
private final boolean numeric;
|
||||
private final String variableTop;
|
||||
private final boolean hiraganaQuaternaryMode;
|
||||
private int ignoreAbove;
|
||||
private final Collator collator;
|
||||
|
||||
protected ICUCollationKeywordFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
|
||||
Settings indexSettings, MultiFields multiFields, CopyTo copyTo, String rules, String language,
|
||||
String country, String variant,
|
||||
String strength, String decomposition, String alternate, boolean caseLevel, String caseFirst,
|
||||
boolean numeric, String variableTop, boolean hiraganaQuaternaryMode, Collator collator) {
|
||||
boolean numeric, String variableTop, boolean hiraganaQuaternaryMode,
|
||||
int ignoreAbove, Collator collator) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
assert collator.isFrozen();
|
||||
this.rules = rules;
|
||||
|
@ -574,6 +590,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
this.numeric = numeric;
|
||||
this.variableTop = variableTop;
|
||||
this.hiraganaQuaternaryMode = hiraganaQuaternaryMode;
|
||||
this.ignoreAbove = ignoreAbove;
|
||||
this.collator = collator;
|
||||
}
|
||||
|
||||
|
@ -642,6 +659,8 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
conflicts.add("Cannot update hiragana_quaternary_mode setting for [" + CONTENT_TYPE + "]");
|
||||
}
|
||||
|
||||
this.ignoreAbove = icuMergeWith.ignoreAbove;
|
||||
|
||||
if (!conflicts.isEmpty()) {
|
||||
throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts);
|
||||
}
|
||||
|
@ -702,6 +721,10 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
if (includeDefaults || hiraganaQuaternaryMode) {
|
||||
builder.field("hiragana_quaternary_mode", hiraganaQuaternaryMode);
|
||||
}
|
||||
|
||||
if (includeDefaults || ignoreAbove != Defaults.IGNORE_ABOVE) {
|
||||
builder.field("ignore_above", ignoreAbove);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -718,7 +741,7 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
if (value == null) {
|
||||
if (value == null || value.length() > ignoreAbove) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -403,4 +403,51 @@ public class ICUCollationKeywordFieldMapperTests extends ESSingleNodeTestCase {
|
|||
assertEquals("Can't merge because of conflicts: [Cannot update language setting for [" + FIELD_TYPE
|
||||
+ "], Cannot update strength setting for [" + FIELD_TYPE + "]]", e.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testIgnoreAbove() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", FIELD_TYPE)
|
||||
.field("ignore_above", 5).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "elk")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
|
||||
IndexableField[] fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(2, fields.length);
|
||||
|
||||
doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "elasticsearch")
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
|
||||
fields = doc.rootDoc().getFields("field");
|
||||
assertEquals(0, fields.length);
|
||||
}
|
||||
|
||||
public void testUpdateIgnoreAbove() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", FIELD_TYPE).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
|
||||
|
||||
mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field").field("type", FIELD_TYPE)
|
||||
.field("ignore_above", 5).endObject().endObject()
|
||||
.endObject().endObject());
|
||||
indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,11 +20,7 @@
|
|||
// only configure immediate children of plugins dir
|
||||
configure(subprojects.findAll { it.parent.path == project.path }) {
|
||||
group = 'org.elasticsearch.plugin'
|
||||
// TODO exclude some plugins as they require features not yet supproted by testclusters
|
||||
if (false == name in ['repository-hdfs']) {
|
||||
apply plugin: 'elasticsearch.testclusters'
|
||||
}
|
||||
|
||||
apply plugin: 'elasticsearch.testclusters'
|
||||
apply plugin: 'elasticsearch.esplugin'
|
||||
|
||||
esplugin {
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.test.ClusterConfiguration
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
|
||||
import java.nio.file.Files
|
||||
|
@ -64,15 +63,17 @@ dependencies {
|
|||
compile "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}"
|
||||
|
||||
hdfsFixture project(':test:fixtures:hdfs-fixture')
|
||||
// Set the keytab files in the classpath so that we can access them from test code without the security manager
|
||||
// freaking out.
|
||||
testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab'])
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /hadoop-.*/, to: 'hadoop'
|
||||
}
|
||||
|
||||
|
||||
String realm = "BUILD.ELASTIC.CO"
|
||||
|
||||
String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")
|
||||
|
||||
// Create HDFS File System Testing Fixtures for HA/Secure combinations
|
||||
for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) {
|
||||
|
@ -96,7 +97,6 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
|
|||
miniHDFSArgs.add('--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED')
|
||||
}
|
||||
}
|
||||
|
||||
// If it's an HA fixture, set a nameservice to use in the JVM options
|
||||
if (fixtureName.equals('haHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) {
|
||||
miniHDFSArgs.add("-Dha-nameservice=ha-hdfs")
|
||||
|
@ -110,8 +110,7 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
|
|||
if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) {
|
||||
miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}")
|
||||
miniHDFSArgs.add(
|
||||
project(':test:fixtures:krb5kdc-fixture')
|
||||
.ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
|
||||
project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab")
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -119,67 +118,36 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture',
|
|||
}
|
||||
}
|
||||
|
||||
// The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks:
|
||||
project.afterEvaluate {
|
||||
for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
|
||||
ClusterConfiguration cluster = project.extensions.getByName("${integTestTaskName}Cluster") as ClusterConfiguration
|
||||
cluster.dependsOn(project.bundlePlugin)
|
||||
|
||||
Task restIntegTestTask = project.tasks.getByName(integTestTaskName)
|
||||
restIntegTestTask.clusterConfig.plugin(project.path)
|
||||
|
||||
// Default jvm arguments for all test clusters
|
||||
String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + System.getProperty('tests.jvm.argline', '')
|
||||
|
||||
// If it's a secure cluster, add the keytab as an extra config, and set the krb5 conf in the JVM options.
|
||||
if (integTestTaskName.equals('integTestSecure') || integTestTaskName.equals('integTestSecureHa')) {
|
||||
String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")
|
||||
restIntegTestTask.clusterConfig.extraConfigFile(
|
||||
"repository-hdfs/krb5.keytab",
|
||||
"${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}"
|
||||
)
|
||||
jvmArgs = jvmArgs + " " + "-Djava.security.krb5.conf=${krb5conf}"
|
||||
if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) {
|
||||
jvmArgs = jvmArgs + " " + '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED'
|
||||
}
|
||||
|
||||
// If it's the HA + Secure tests then also set the Kerberos settings for the integration test JVM since we'll
|
||||
// need to auth to HDFS to trigger namenode failovers.
|
||||
if (integTestTaskName.equals('integTestSecureHa')) {
|
||||
Task restIntegTestTaskRunner = project.tasks.getByName("${integTestTaskName}Runner")
|
||||
restIntegTestTaskRunner.systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
|
||||
restIntegTestTaskRunner.systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
|
||||
restIntegTestTaskRunner.jvmArgs "-Djava.security.krb5.conf=${krb5conf}"
|
||||
if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) {
|
||||
restIntegTestTaskRunner.jvmArgs '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED'
|
||||
}
|
||||
restIntegTestTaskRunner.systemProperty (
|
||||
for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) {
|
||||
task "${integTestTaskName}"(type: RestIntegTestTask) {
|
||||
description = "Runs rest tests against an elasticsearch cluster with HDFS."
|
||||
dependsOn(project.bundlePlugin)
|
||||
runner {
|
||||
if (integTestTaskName.contains("Secure")) {
|
||||
dependsOn secureHdfsFixture
|
||||
systemProperty "test.krb5.principal.es", "elasticsearch@${realm}"
|
||||
systemProperty "test.krb5.principal.hdfs", "hdfs/hdfs.build.elastic.co@${realm}"
|
||||
jvmArgs "-Djava.security.krb5.conf=${krb5conf}"
|
||||
systemProperty (
|
||||
"test.krb5.keytab.hdfs",
|
||||
project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab")
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
restIntegTestTask.clusterConfig.jvmArgs = jvmArgs
|
||||
testClusters."${integTestTaskName}" {
|
||||
plugin(file(bundlePlugin.archiveFile))
|
||||
if (integTestTaskName.contains("Secure")) {
|
||||
systemProperty "java.security.krb5.conf", krb5conf
|
||||
extraConfigFile(
|
||||
"repository-hdfs/krb5.keytab",
|
||||
file("${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}")
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a Integration Test suite just for HA based tests
|
||||
RestIntegTestTask integTestHa = project.tasks.create('integTestHa', RestIntegTestTask.class) {
|
||||
description = "Runs rest tests against an elasticsearch cluster with HDFS configured with HA Namenode."
|
||||
}
|
||||
|
||||
// Create a Integration Test suite just for security based tests
|
||||
RestIntegTestTask integTestSecure = project.tasks.create('integTestSecure', RestIntegTestTask.class) {
|
||||
description = "Runs rest tests against an elasticsearch cluster with HDFS secured by MIT Kerberos."
|
||||
}
|
||||
|
||||
// Create a Integration Test suite just for HA related security based tests
|
||||
RestIntegTestTask integTestSecureHa = project.tasks.create('integTestSecureHa', RestIntegTestTask.class) {
|
||||
description = "Runs rest tests against an elasticsearch cluster with HDFS configured with HA Namenode and secured by MIT Kerberos."
|
||||
}
|
||||
|
||||
// Determine HDFS Fixture compatibility for the current build environment.
|
||||
boolean fixtureSupported = false
|
||||
|
@ -208,21 +176,27 @@ if (legalPath == false) {
|
|||
|
||||
// Always ignore HA integration tests in the normal integration test runner, they are included below as
|
||||
// part of their own HA-specific integration test tasks.
|
||||
integTestRunner.exclude('**/Ha*TestSuiteIT.class')
|
||||
integTest.runner {
|
||||
exclude('**/Ha*TestSuiteIT.class')
|
||||
}
|
||||
|
||||
if (fixtureSupported) {
|
||||
// Check depends on the HA test. Already depends on the standard test.
|
||||
project.check.dependsOn(integTestHa)
|
||||
|
||||
// Both standard and HA tests depend on their respective HDFS fixtures
|
||||
integTestCluster.dependsOn hdfsFixture
|
||||
integTestHaCluster.dependsOn haHdfsFixture
|
||||
integTest.dependsOn hdfsFixture
|
||||
integTestHa.dependsOn haHdfsFixture
|
||||
|
||||
// The normal test runner only runs the standard hdfs rest tests
|
||||
integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository'
|
||||
integTest.runner {
|
||||
systemProperty 'tests.rest.suite', 'hdfs_repository'
|
||||
}
|
||||
|
||||
// Only include the HA integration tests for the HA test task
|
||||
integTestHaRunner.setIncludes(['**/Ha*TestSuiteIT.class'])
|
||||
integTestHa.runner {
|
||||
setIncludes(['**/Ha*TestSuiteIT.class'])
|
||||
}
|
||||
} else {
|
||||
if (legalPath) {
|
||||
logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
|
||||
|
@ -231,29 +205,27 @@ if (fixtureSupported) {
|
|||
}
|
||||
|
||||
// The normal integration test runner will just test that the plugin loads
|
||||
integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
|
||||
integTest.runner {
|
||||
systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
|
||||
}
|
||||
// HA fixture is unsupported. Don't run them.
|
||||
integTestHa.setEnabled(false)
|
||||
}
|
||||
|
||||
check.dependsOn(integTestSecure, integTestSecureHa)
|
||||
|
||||
// Fixture dependencies
|
||||
integTestSecureCluster.dependsOn secureHdfsFixture
|
||||
integTestSecureHaCluster.dependsOn secureHaHdfsFixture
|
||||
|
||||
// Set the keytab files in the classpath so that we can access them from test code without the security manager
|
||||
// freaking out.
|
||||
project.dependencies {
|
||||
testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab'])
|
||||
}
|
||||
|
||||
// Run just the secure hdfs rest test suite.
|
||||
integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository'
|
||||
integTestSecure.runner {
|
||||
systemProperty 'tests.rest.suite', 'secure_hdfs_repository'
|
||||
}
|
||||
// Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner.
|
||||
integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class')
|
||||
integTestSecure.runner {
|
||||
exclude('**/Ha*TestSuiteIT.class')
|
||||
}
|
||||
// Only include the HA integration tests for the HA test task
|
||||
integTestSecureHaRunner.setIncludes(['**/Ha*TestSuiteIT.class'])
|
||||
integTestSecureHa.runner {
|
||||
setIncludes(['**/Ha*TestSuiteIT.class'])
|
||||
}
|
||||
|
||||
thirdPartyAudit {
|
||||
ignoreMissingClasses()
|
||||
|
|
|
@ -23,6 +23,7 @@ import com.amazonaws.AmazonClientException;
|
|||
import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
|
||||
import com.amazonaws.services.s3.model.AmazonS3Exception;
|
||||
import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
|
||||
import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
|
||||
import com.amazonaws.services.s3.model.ObjectListing;
|
||||
import com.amazonaws.services.s3.model.ObjectMetadata;
|
||||
|
@ -56,6 +57,12 @@ import static org.elasticsearch.repositories.s3.S3Repository.MIN_PART_SIZE_USING
|
|||
|
||||
class S3BlobContainer extends AbstractBlobContainer {
|
||||
|
||||
/**
|
||||
* Maximum number of deletes in a {@link DeleteObjectsRequest}.
|
||||
* @see <a href="https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html">S3 Documentation</a>.
|
||||
*/
|
||||
private static final int MAX_BULK_DELETES = 1000;
|
||||
|
||||
private final S3BlobStore blobStore;
|
||||
private final String keyPath;
|
||||
|
||||
|
@ -118,6 +125,51 @@ class S3BlobContainer extends AbstractBlobContainer {
|
|||
deleteBlobIgnoringIfNotExists(blobName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
|
||||
if (blobNames.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
// S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes
|
||||
final List<DeleteObjectsRequest> deleteRequests = new ArrayList<>();
|
||||
final List<String> partition = new ArrayList<>();
|
||||
for (String blob : blobNames) {
|
||||
partition.add(buildKey(blob));
|
||||
if (partition.size() == MAX_BULK_DELETES ) {
|
||||
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
|
||||
partition.clear();
|
||||
}
|
||||
}
|
||||
if (partition.isEmpty() == false) {
|
||||
deleteRequests.add(bulkDelete(blobStore.bucket(), partition));
|
||||
}
|
||||
SocketAccess.doPrivilegedVoid(() -> {
|
||||
AmazonClientException aex = null;
|
||||
for (DeleteObjectsRequest deleteRequest : deleteRequests) {
|
||||
try {
|
||||
clientReference.client().deleteObjects(deleteRequest);
|
||||
} catch (AmazonClientException e) {
|
||||
if (aex == null) {
|
||||
aex = e;
|
||||
} else {
|
||||
aex.addSuppressed(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (aex != null) {
|
||||
throw aex;
|
||||
}
|
||||
});
|
||||
} catch (final AmazonClientException e) {
|
||||
throw new IOException("Exception when deleting blobs [" + blobNames + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static DeleteObjectsRequest bulkDelete(String bucket, List<String> blobs) {
|
||||
return new DeleteObjectsRequest(bucket).withKeys(blobs.toArray(Strings.EMPTY_ARRAY)).withQuiet(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlobIgnoringIfNotExists(String blobName) throws IOException {
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
|
|
|
@ -324,7 +324,7 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
|
|||
// Delete Multiple Objects
|
||||
//
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
|
||||
handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/"), (request) -> {
|
||||
final RequestHandler bulkDeleteHandler = request -> {
|
||||
final List<String> deletes = new ArrayList<>();
|
||||
final List<String> errors = new ArrayList<>();
|
||||
|
||||
|
@ -344,7 +344,6 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
|
|||
if (closingOffset != -1) {
|
||||
offset = offset + startMarker.length();
|
||||
final String objectName = requestBody.substring(offset, closingOffset);
|
||||
|
||||
boolean found = false;
|
||||
for (Bucket bucket : buckets.values()) {
|
||||
if (bucket.objects.containsKey(objectName)) {
|
||||
|
@ -369,7 +368,9 @@ public class AmazonS3Fixture extends AbstractHttpFixture {
|
|||
}
|
||||
}
|
||||
return newInternalError(request.getId(), "Something is wrong with this POST multiple deletes request");
|
||||
});
|
||||
};
|
||||
handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/"), bulkDeleteHandler);
|
||||
handlers.insert(nonAuthPath(HttpPost.METHOD_NAME, "/{bucket}"), bulkDeleteHandler);
|
||||
|
||||
// non-authorized requests
|
||||
|
||||
|
|
|
@ -158,11 +158,7 @@ class MockAmazonS3 extends AbstractAmazonS3 {
|
|||
|
||||
final List<DeleteObjectsResult.DeletedObject> deletions = new ArrayList<>();
|
||||
for (DeleteObjectsRequest.KeyVersion key : request.getKeys()) {
|
||||
if (blobs.remove(key.getKey()) == null) {
|
||||
AmazonS3Exception exception = new AmazonS3Exception("[" + key + "] does not exist.");
|
||||
exception.setStatusCode(404);
|
||||
throw exception;
|
||||
} else {
|
||||
if (blobs.remove(key.getKey()) != null) {
|
||||
DeleteObjectsResult.DeletedObject deletion = new DeleteObjectsResult.DeletedObject();
|
||||
deletion.setKey(key.getKey());
|
||||
deletions.add(deletion);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"ingest.delete_pipeline": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html",
|
||||
"methods": [ "DELETE" ],
|
||||
"url": {
|
||||
"path": "/_ingest/pipeline/{id}",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"ingest.get_pipeline": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html",
|
||||
"methods": [ "GET" ],
|
||||
"url": {
|
||||
"path": "/_ingest/pipeline/{id}",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"ingest.processor_grok": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/grok-processor.html#grok-processor-rest-get",
|
||||
"methods": [ "GET" ],
|
||||
"url": {
|
||||
"path": "/_ingest/processor/grok",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"ingest.put_pipeline": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html",
|
||||
"methods": [ "PUT" ],
|
||||
"url": {
|
||||
"path": "/_ingest/pipeline/{id}",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"ingest.simulate": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/plugins/master/ingest.html",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html",
|
||||
"methods": [ "GET", "POST" ],
|
||||
"url": {
|
||||
"path": "/_ingest/pipeline/_simulate",
|
||||
|
|
|
@ -18,6 +18,22 @@ setup:
|
|||
nested_long:
|
||||
type: long
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: other
|
||||
body:
|
||||
mappings:
|
||||
properties:
|
||||
date:
|
||||
type: date
|
||||
long:
|
||||
type: long
|
||||
nested:
|
||||
type: nested
|
||||
properties:
|
||||
nested_long:
|
||||
type: long
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
|
@ -54,9 +70,15 @@ setup:
|
|||
id: 6
|
||||
body: { "date": "2017-10-21T07:00:00" }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: other
|
||||
id: 0
|
||||
body: { "date": "2017-10-20T03:08:45" }
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: [test]
|
||||
index: [test, other]
|
||||
|
||||
---
|
||||
"Simple Composite aggregation":
|
||||
|
@ -419,3 +441,84 @@ setup:
|
|||
- match: { aggregations.1.2.buckets.0.doc_count: 2 }
|
||||
- match: { aggregations.1.2.buckets.1.key.nested: 1000 }
|
||||
- match: { aggregations.1.2.buckets.1.doc_count: 1 }
|
||||
|
||||
---
|
||||
"Composite aggregation with unmapped field":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: starting in 7.1 the composite aggregation handles unmapped fields as keywords
|
||||
|
||||
- do:
|
||||
search:
|
||||
rest_total_hits_as_int: true
|
||||
index: [test, other]
|
||||
body:
|
||||
aggregations:
|
||||
test:
|
||||
composite:
|
||||
sources: [
|
||||
{
|
||||
"long": {
|
||||
"terms": {
|
||||
"field": "long"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kw": {
|
||||
"terms": {
|
||||
"field": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
- match: {hits.total: 7}
|
||||
- length: { aggregations.test.buckets: 5 }
|
||||
- match: { aggregations.test.buckets.0.key.long: 0}
|
||||
- match: { aggregations.test.buckets.0.key.kw: "bar" }
|
||||
- match: { aggregations.test.buckets.0.doc_count: 2 }
|
||||
- match: { aggregations.test.buckets.1.key.long: 10 }
|
||||
- match: { aggregations.test.buckets.1.key.kw: "foo"}
|
||||
- match: { aggregations.test.buckets.1.doc_count: 1 }
|
||||
- match: { aggregations.test.buckets.2.key.long: 20 }
|
||||
- match: { aggregations.test.buckets.2.key.kw: "foo" }
|
||||
- match: { aggregations.test.buckets.2.doc_count: 1 }
|
||||
- match: { aggregations.test.buckets.3.key.long: 100}
|
||||
- match: { aggregations.test.buckets.3.key.kw: "bar" }
|
||||
- match: { aggregations.test.buckets.3.doc_count: 1 }
|
||||
- match: { aggregations.test.buckets.4.key.long: 1000 }
|
||||
- match: { aggregations.test.buckets.4.key.kw: "bar" }
|
||||
- match: { aggregations.test.buckets.4.doc_count: 1 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
rest_total_hits_as_int: true
|
||||
index: [test, other]
|
||||
body:
|
||||
aggregations:
|
||||
test:
|
||||
composite:
|
||||
after: { "long": 100, "kw": "bar" }
|
||||
sources: [
|
||||
{
|
||||
"long": {
|
||||
"terms": {
|
||||
"field": "long"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"kw": {
|
||||
"terms": {
|
||||
"field": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
- match: {hits.total: 7}
|
||||
- length: { aggregations.test.buckets: 1 }
|
||||
- match: { aggregations.test.buckets.0.key.long: 1000 }
|
||||
- match: { aggregations.test.buckets.0.key.kw: "bar" }
|
||||
- match: { aggregations.test.buckets.0.doc_count: 1 }
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 7.99.99" #TODO change this after backport
|
||||
reason: These new error messages were added in 7.1
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_1
|
||||
body:
|
||||
settings:
|
||||
number_of_replicas: 0
|
||||
mappings:
|
||||
properties:
|
||||
int_field:
|
||||
type : integer
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- index:
|
||||
_index: test_1
|
||||
_id: 1
|
||||
- int_field: 1
|
||||
- index:
|
||||
_index: test_1
|
||||
_id: 2
|
||||
- int_field: 2
|
||||
- index:
|
||||
_index: test_1
|
||||
_id: 3
|
||||
- int_field: 3
|
||||
- index:
|
||||
_index: test_1
|
||||
_id: 4
|
||||
- int_field: 4
|
||||
|
||||
---
|
||||
"Max pipeline through terms agg":
|
||||
|
||||
- do:
|
||||
catch: /\[Object\[\]\] at aggregation \[the_terms_2\]/
|
||||
search:
|
||||
rest_total_hits_as_int: true
|
||||
body:
|
||||
aggs:
|
||||
the_terms:
|
||||
terms:
|
||||
field: "int_field"
|
||||
aggs:
|
||||
the_terms_2:
|
||||
terms:
|
||||
field: "int_field"
|
||||
aggs:
|
||||
the_max:
|
||||
max:
|
||||
field: "int_field"
|
||||
the_bad_max:
|
||||
max_bucket:
|
||||
buckets_path: "the_terms>the_terms_2>the_max"
|
||||
|
||||
---
|
||||
"Max pipeline on terms agg":
|
||||
|
||||
- do:
|
||||
catch: /\[LongTerms\] at aggregation \[the_terms_2\]/
|
||||
search:
|
||||
rest_total_hits_as_int: true
|
||||
body:
|
||||
aggs:
|
||||
the_terms:
|
||||
terms:
|
||||
field: "int_field"
|
||||
aggs:
|
||||
the_terms_2:
|
||||
terms:
|
||||
field: "int_field"
|
||||
the_bad_max:
|
||||
max_bucket:
|
||||
buckets_path: "the_terms>the_terms_2"
|
||||
|
||||
---
|
||||
"Max pipeline on percentiles agg without specifying percent":
|
||||
|
||||
- do:
|
||||
catch: /buckets_path must reference either a number value or a single value numeric metric aggregation, but \[the_percentiles\] contains multiple values. Please specify which to use\./
|
||||
search:
|
||||
rest_total_hits_as_int: true
|
||||
body:
|
||||
aggs:
|
||||
the_terms:
|
||||
terms:
|
||||
field: "int_field"
|
||||
aggs:
|
||||
the_percentiles:
|
||||
percentiles:
|
||||
field: "int_field"
|
||||
the_bad_max:
|
||||
max_bucket:
|
||||
buckets_path: "the_terms>the_percentiles"
|
|
@ -113,23 +113,17 @@ public abstract class BlendedTermQuery extends Query {
|
|||
// TODO: Maybe it could also make sense to assume independent distributions of documents and eg. have:
|
||||
// df = df1 + df2 - (df1 * df2 / maxDoc)?
|
||||
max = Math.max(df, max);
|
||||
if (minSumTTF != -1 && ctx.totalTermFreq() != -1) {
|
||||
if (ctx.totalTermFreq() > 0) {
|
||||
// we need to find out the minimum sumTTF to adjust the statistics
|
||||
// otherwise the statistics don't match
|
||||
minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field()));
|
||||
} else {
|
||||
minSumTTF = -1;
|
||||
}
|
||||
|
||||
}
|
||||
if (minSumTTF != -1 && maxDoc > minSumTTF) {
|
||||
maxDoc = (int)minSumTTF;
|
||||
}
|
||||
|
||||
if (max == 0) {
|
||||
return; // we are done that term doesn't exist at all
|
||||
}
|
||||
long sumTTF = minSumTTF == -1 ? -1 : 0;
|
||||
long sumTTF = 0;
|
||||
final int[] tieBreak = new int[contexts.length];
|
||||
for (int i = 0; i < tieBreak.length; ++i) {
|
||||
tieBreak[i] = i;
|
||||
|
@ -165,11 +159,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
}
|
||||
contexts[i] = ctx = adjustDF(reader.getContext(), ctx, Math.min(maxDoc, actualDf));
|
||||
prev = current;
|
||||
if (sumTTF >= 0 && ctx.totalTermFreq() >= 0) {
|
||||
sumTTF += ctx.totalTermFreq();
|
||||
} else {
|
||||
sumTTF = -1; // omit once TF is omitted anywhere!
|
||||
}
|
||||
sumTTF += ctx.totalTermFreq();
|
||||
}
|
||||
sumTTF = Math.min(sumTTF, minSumTTF);
|
||||
for (int i = 0; i < contexts.length; i++) {
|
||||
|
@ -177,17 +167,12 @@ public abstract class BlendedTermQuery extends Query {
|
|||
if (df == 0) {
|
||||
continue;
|
||||
}
|
||||
// the blended sumTTF can't be greater than the sumTTTF on the field
|
||||
final long fixedTTF = sumTTF == -1 ? -1 : sumTTF;
|
||||
contexts[i] = adjustTTF(reader.getContext(), contexts[i], fixedTTF);
|
||||
contexts[i] = adjustTTF(reader.getContext(), contexts[i], sumTTF);
|
||||
}
|
||||
}
|
||||
|
||||
private TermStates adjustTTF(IndexReaderContext readerContext, TermStates termContext, long sumTTF) throws IOException {
|
||||
assert termContext.wasBuiltFor(readerContext);
|
||||
if (sumTTF == -1 && termContext.totalTermFreq() == -1) {
|
||||
return termContext;
|
||||
}
|
||||
TermStates newTermContext = new TermStates(readerContext);
|
||||
List<LeafReaderContext> leaves = readerContext.leaves();
|
||||
final int len;
|
||||
|
@ -213,12 +198,7 @@ public abstract class BlendedTermQuery extends Query {
|
|||
private static TermStates adjustDF(IndexReaderContext readerContext, TermStates ctx, int newDocFreq) throws IOException {
|
||||
assert ctx.wasBuiltFor(readerContext);
|
||||
// Use a value of ttf that is consistent with the doc freq (ie. gte)
|
||||
long newTTF;
|
||||
if (ctx.totalTermFreq() < 0) {
|
||||
newTTF = -1;
|
||||
} else {
|
||||
newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
|
||||
}
|
||||
long newTTF = Math.max(ctx.totalTermFreq(), newDocFreq);
|
||||
List<LeafReaderContext> leaves = readerContext.leaves();
|
||||
final int len;
|
||||
if (leaves == null) {
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.elasticsearch.action;
|
|||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.util.Objects;
|
||||
|
@ -30,7 +30,7 @@ import java.util.Objects;
|
|||
* An exception indicating that a failure occurred performing an operation on the shard.
|
||||
*
|
||||
*/
|
||||
public abstract class ShardOperationFailedException implements Streamable, ToXContent {
|
||||
public abstract class ShardOperationFailedException implements Streamable, ToXContentObject {
|
||||
|
||||
protected String index;
|
||||
protected int shardId = -1;
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -36,7 +36,7 @@ import java.util.Objects;
|
|||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptRequest> implements ToXContent {
|
||||
public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptRequest> implements ToXContentFragment {
|
||||
|
||||
private String id;
|
||||
private String context;
|
||||
|
|
|
@ -101,8 +101,8 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA
|
|||
|
||||
private void executeShardOperation(final ShardRequest request, final IndexShard indexShard) {
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
if (indexShard.getActiveOperationsCount() != 0) {
|
||||
throw new IllegalStateException("On-going operations in progress while checking index shard " + shardId + " before closing");
|
||||
if (indexShard.getActiveOperationsCount() != IndexShard.OPERATIONS_BLOCKED) {
|
||||
throw new IllegalStateException("Index shard " + shardId + " is not blocking all operations during closing");
|
||||
}
|
||||
|
||||
final ClusterBlocks clusterBlocks = clusterService.state().blocks();
|
||||
|
|
|
@ -120,7 +120,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName);
|
||||
MetaDataCreateIndexService.validateIndexName(rolloverIndexName, state); // will fail if the index already exists
|
||||
checkNoDuplicatedAliasInIndexTemplate(metaData, rolloverIndexName, rolloverRequest.getAlias());
|
||||
client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute(
|
||||
client.admin().indices().prepareStats(rolloverRequest.getAlias()).clear().setDocs(true).execute(
|
||||
new ActionListener<IndicesStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndicesStatsResponse statsResponse) {
|
||||
|
@ -249,7 +249,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
|
||||
static Map<String, Boolean> evaluateConditions(final Collection<Condition<?>> conditions, final IndexMetaData metaData,
|
||||
final IndicesStatsResponse statsResponse) {
|
||||
return evaluateConditions(conditions, statsResponse.getPrimaries().getDocs(), metaData);
|
||||
return evaluateConditions(conditions, statsResponse.getIndex(metaData.getIndex().getName()).getPrimaries().getDocs(), metaData);
|
||||
}
|
||||
|
||||
static void validate(MetaData metaData, RolloverRequest request) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.indices.shards;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
@ -267,8 +268,10 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("node", nodeId());
|
||||
super.toXContent(builder, params);
|
||||
super.innerToXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
@ -361,9 +364,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
if (failures.size() > 0) {
|
||||
builder.startArray(Fields.FAILURES);
|
||||
for (Failure failure : failures) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
|
|
@ -108,4 +108,24 @@ public class IndexStats implements Iterable<IndexShardStats> {
|
|||
primary = stats;
|
||||
return stats;
|
||||
}
|
||||
|
||||
public static class IndexStatsBuilder {
|
||||
private final String indexName;
|
||||
private final String uuid;
|
||||
private final List<ShardStats> shards = new ArrayList<>();
|
||||
|
||||
public IndexStatsBuilder(String indexName, String uuid) {
|
||||
this.indexName = indexName;
|
||||
this.uuid = uuid;
|
||||
}
|
||||
|
||||
public IndexStatsBuilder add(ShardStats shardStats) {
|
||||
shards.add(shardStats);
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndexStats build() {
|
||||
return new IndexStats(indexName, uuid, shards.toArray(new ShardStats[shards.size()]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
|
@ -29,12 +30,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
|
@ -83,26 +82,17 @@ public class IndicesStatsResponse extends BroadcastResponse {
|
|||
if (indicesStats != null) {
|
||||
return indicesStats;
|
||||
}
|
||||
Map<String, IndexStats> indicesStats = new HashMap<>();
|
||||
|
||||
Set<Index> indices = new HashSet<>();
|
||||
final Map<String, IndexStatsBuilder> indexToIndexStatsBuilder = new HashMap<>();
|
||||
for (ShardStats shard : shards) {
|
||||
indices.add(shard.getShardRouting().index());
|
||||
Index index = shard.getShardRouting().index();
|
||||
IndexStatsBuilder indexStatsBuilder = indexToIndexStatsBuilder.computeIfAbsent(index.getName(),
|
||||
k -> new IndexStatsBuilder(k, index.getUUID()));
|
||||
indexStatsBuilder.add(shard);
|
||||
}
|
||||
|
||||
for (Index index : indices) {
|
||||
List<ShardStats> shards = new ArrayList<>();
|
||||
String indexName = index.getName();
|
||||
for (ShardStats shard : this.shards) {
|
||||
if (shard.getShardRouting().getIndexName().equals(indexName)) {
|
||||
shards.add(shard);
|
||||
}
|
||||
}
|
||||
indicesStats.put(
|
||||
indexName, new IndexStats(indexName, index.getUUID(), shards.toArray(new ShardStats[shards.size()]))
|
||||
);
|
||||
}
|
||||
this.indicesStats = indicesStats;
|
||||
indicesStats = indexToIndexStatsBuilder.entrySet().stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().build()));
|
||||
return indicesStats;
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -60,14 +60,14 @@ import java.util.Set;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
|
||||
|
||||
/**
|
||||
* A request to create an index template.
|
||||
*/
|
||||
public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContent {
|
||||
public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateRequest> implements IndicesRequest, ToXContentObject {
|
||||
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(PutIndexTemplateRequest.class));
|
||||
|
||||
|
@ -519,32 +519,35 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("index_patterns", indexPatterns);
|
||||
builder.field("order", order);
|
||||
if (version != null) {
|
||||
builder.field("version", version);
|
||||
}
|
||||
|
||||
builder.startObject("settings");
|
||||
settings.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject("mappings");
|
||||
for (Map.Entry<String, String> entry : mappings.entrySet()) {
|
||||
builder.field(entry.getKey());
|
||||
try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY,
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION, entry.getValue())) {
|
||||
builder.copyCurrentStructure(parser);
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field("index_patterns", indexPatterns);
|
||||
builder.field("order", order);
|
||||
if (version != null) {
|
||||
builder.field("version", version);
|
||||
}
|
||||
|
||||
builder.startObject("settings");
|
||||
settings.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject("mappings");
|
||||
for (Map.Entry<String, String> entry : mappings.entrySet()) {
|
||||
builder.field(entry.getKey());
|
||||
try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY,
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION, entry.getValue())) {
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject("aliases");
|
||||
for (Alias alias : aliases) {
|
||||
alias.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject("aliases");
|
||||
for (Alias alias : aliases) {
|
||||
alias.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,7 +117,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
TransportShardBulkAction shardBulkAction, NodeClient client,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
|
||||
super(BulkAction.NAME, transportService, actionFilters, (Supplier<BulkRequest>) BulkRequest::new);
|
||||
super(BulkAction.NAME, transportService, actionFilters, (Supplier<BulkRequest>) BulkRequest::new, ThreadPool.Names.WRITE);
|
||||
Objects.requireNonNull(relativeTimeProvider);
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
|
@ -258,7 +258,8 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
@Override
|
||||
public void onResponse(CreateIndexResponse result) {
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
|
||||
threadPool.executor(ThreadPool.Names.WRITE).execute(
|
||||
() -> executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -140,9 +140,7 @@ public class SearchPhaseExecutionException extends ElasticsearchException {
|
|||
builder.startArray();
|
||||
ShardOperationFailedException[] failures = ExceptionsHelper.groupBy(shardFailures);
|
||||
for (ShardOperationFailedException failure : failures) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
@ -408,7 +408,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
|||
* Holds info about the clusters that the search was executed on: how many in total, how many of them were successful
|
||||
* and how many of them were skipped.
|
||||
*/
|
||||
public static class Clusters implements ToXContent, Writeable {
|
||||
public static class Clusters implements ToXContentFragment, Writeable {
|
||||
|
||||
public static final Clusters EMPTY = new Clusters(0, 0, 0);
|
||||
|
||||
|
|
|
@ -118,14 +118,18 @@ public class ShardSearchFailure extends ShardOperationFailedException {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(SHARD_FIELD, shardId());
|
||||
builder.field(INDEX_FIELD, index());
|
||||
if (shardTarget != null) {
|
||||
builder.field(NODE_FIELD, shardTarget.getNodeId());
|
||||
}
|
||||
builder.field(REASON_FIELD);
|
||||
builder.startObject();
|
||||
ElasticsearchException.generateThrowableXContent(builder, params, cause);
|
||||
{
|
||||
builder.field(SHARD_FIELD, shardId());
|
||||
builder.field(INDEX_FIELD, index());
|
||||
if (shardTarget != null) {
|
||||
builder.field(NODE_FIELD, shardTarget.getNodeId());
|
||||
}
|
||||
builder.field(REASON_FIELD);
|
||||
builder.startObject();
|
||||
ElasticsearchException.generateThrowableXContent(builder, params, cause);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -91,6 +91,13 @@ public class DefaultShardOperationFailedException extends ShardOperationFailedEx
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
innerToXContent(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("shard", shardId());
|
||||
builder.field("index", index());
|
||||
builder.field("status", status.name());
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -57,6 +56,13 @@ public abstract class HandledTransportAction<Request extends ActionRequest, Resp
|
|||
new TransportHandler());
|
||||
}
|
||||
|
||||
protected HandledTransportAction(String actionName, TransportService transportService, ActionFilters actionFilters,
|
||||
Supplier<Request> request, String executor) {
|
||||
super(actionName, actionFilters, transportService.getTaskManager());
|
||||
transportService.registerRequestHandler(actionName, request, executor, false, true,
|
||||
new TransportHandler());
|
||||
}
|
||||
|
||||
protected HandledTransportAction(String actionName, boolean canTripCircuitBreaker,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
Writeable.Reader<Request> requestReader) {
|
||||
|
@ -73,9 +79,8 @@ public abstract class HandledTransportAction<Request extends ActionRequest, Resp
|
|||
|
||||
class TransportHandler implements TransportRequestHandler<Request> {
|
||||
@Override
|
||||
public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
||||
public final void messageReceived(final Request request, final TransportChannel channel, Task task) {
|
||||
// We already got the task created on the network layer - no need to create it again on the transport layer
|
||||
Logger logger = HandledTransportAction.this.logger;
|
||||
execute(task, request, new ChannelActionListener<>(channel, actionName, request));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ public class ReplicationOperation<
|
|||
private final long primaryTerm;
|
||||
|
||||
// exposed for tests
|
||||
final ActionListener<PrimaryResultT> resultListener;
|
||||
private final ActionListener<PrimaryResultT> resultListener;
|
||||
|
||||
private volatile PrimaryResultT primaryResult = null;
|
||||
|
||||
|
|
|
@ -358,37 +358,35 @@ public abstract class TransportReplicationAction<
|
|||
});
|
||||
} else {
|
||||
setPhase(replicationTask, "primary");
|
||||
createReplicatedOperation(primaryRequest.getRequest(),
|
||||
ActionListener.wrap(result -> result.respond(
|
||||
new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
if (syncGlobalCheckpointAfterOperation) {
|
||||
final IndexShard shard = primaryShardReference.indexShard;
|
||||
try {
|
||||
shard.maybeSyncGlobalCheckpoint("post-operation");
|
||||
} catch (final Exception e) {
|
||||
// only log non-closed exceptions
|
||||
if (ExceptionsHelper.unwrap(
|
||||
e, AlreadyClosedException.class, IndexShardClosedException.class) == null) {
|
||||
// intentionally swallow, a missed global checkpoint sync should not fail this operation
|
||||
logger.info(
|
||||
new ParameterizedMessage(
|
||||
"{} failed to execute post-operation global checkpoint sync", shard.shardId()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
primaryShardReference.close(); // release shard operation lock before responding to caller
|
||||
setPhase(replicationTask, "finished");
|
||||
onCompletionListener.onResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
handleException(primaryShardReference, e);
|
||||
final ActionListener<Response> referenceClosingListener = ActionListener.wrap(response -> {
|
||||
primaryShardReference.close(); // release shard operation lock before responding to caller
|
||||
setPhase(replicationTask, "finished");
|
||||
onCompletionListener.onResponse(response);
|
||||
}, e -> handleException(primaryShardReference, e));
|
||||
|
||||
final ActionListener<Response> globalCheckpointSyncingListener = ActionListener.wrap(response -> {
|
||||
if (syncGlobalCheckpointAfterOperation) {
|
||||
final IndexShard shard = primaryShardReference.indexShard;
|
||||
try {
|
||||
shard.maybeSyncGlobalCheckpoint("post-operation");
|
||||
} catch (final Exception e) {
|
||||
// only log non-closed exceptions
|
||||
if (ExceptionsHelper.unwrap(
|
||||
e, AlreadyClosedException.class, IndexShardClosedException.class) == null) {
|
||||
// intentionally swallow, a missed global checkpoint sync should not fail this operation
|
||||
logger.info(
|
||||
new ParameterizedMessage(
|
||||
"{} failed to execute post-operation global checkpoint sync", shard.shardId()), e);
|
||||
}
|
||||
}), e -> handleException(primaryShardReference, e)
|
||||
), primaryShardReference).execute();
|
||||
}
|
||||
}
|
||||
referenceClosingListener.onResponse(response);
|
||||
}, referenceClosingListener::onFailure);
|
||||
|
||||
new ReplicationOperation<>(primaryRequest.getRequest(), primaryShardReference,
|
||||
ActionListener.wrap(result -> result.respond(globalCheckpointSyncingListener), referenceClosingListener::onFailure),
|
||||
newReplicasProxy(), logger, actionName, primaryRequest.getPrimaryTerm()).execute();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
handleException(primaryShardReference, e);
|
||||
|
@ -406,12 +404,6 @@ public abstract class TransportReplicationAction<
|
|||
onCompletionListener.onFailure(e);
|
||||
}
|
||||
|
||||
protected ReplicationOperation<Request, ReplicaRequest, PrimaryResult<ReplicaRequest, Response>> createReplicatedOperation(
|
||||
Request request, ActionListener<PrimaryResult<ReplicaRequest, Response>> listener,
|
||||
PrimaryShardReference primaryShardReference) {
|
||||
return new ReplicationOperation<>(request, primaryShardReference, listener,
|
||||
newReplicasProxy(), logger, actionName, primaryRequest.getPrimaryTerm());
|
||||
}
|
||||
}
|
||||
|
||||
public static class PrimaryResult<ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
|
||||
|
@ -522,6 +514,7 @@ public abstract class TransportReplicationAction<
|
|||
@Override
|
||||
public void onResponse(Releasable releasable) {
|
||||
try {
|
||||
assert replica.getActiveOperationsCount() != 0 : "must perform shard operation under a permit";
|
||||
final ReplicaResult replicaResult = shardOperationOnReplica(replicaRequest.getRequest(), replica);
|
||||
releasable.close(); // release shard operation lock before responding to caller
|
||||
final TransportReplicationAction.ReplicaResponse response =
|
||||
|
@ -921,6 +914,7 @@ public abstract class TransportReplicationAction<
|
|||
return result;
|
||||
});
|
||||
}
|
||||
assert indexShard.getActiveOperationsCount() != 0 : "must perform shard operation under a permit";
|
||||
shardOperationOnPrimary(request, indexShard, listener);
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Internal startup code.
|
||||
|
@ -185,8 +186,15 @@ final class Bootstrap {
|
|||
IOUtils.close(node, spawner);
|
||||
LoggerContext context = (LoggerContext) LogManager.getContext(false);
|
||||
Configurator.shutdown(context);
|
||||
if (node != null && node.awaitClose(10, TimeUnit.SECONDS) == false) {
|
||||
throw new IllegalStateException("Node didn't stop within 10 seconds. " +
|
||||
"Any outstanding requests or tasks might get killed.");
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new ElasticsearchException("failed to stop node", ex);
|
||||
} catch (InterruptedException e) {
|
||||
LogManager.getLogger(Bootstrap.class).warn("Thread got interrupted while waiting for the node to shutdown.");
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -269,6 +277,12 @@ final class Bootstrap {
|
|||
static void stop() throws IOException {
|
||||
try {
|
||||
IOUtils.close(INSTANCE.node, INSTANCE.spawner);
|
||||
if (INSTANCE.node != null && INSTANCE.node.awaitClose(10, TimeUnit.SECONDS) == false) {
|
||||
throw new IllegalStateException("Node didn't stop within 10 seconds. Any outstanding requests or tasks might get killed.");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LogManager.getLogger(Bootstrap.class).warn("Thread got interrupted while waiting for the node to shutdown.");
|
||||
Thread.currentThread().interrupt();
|
||||
} finally {
|
||||
INSTANCE.keepAliveLatch.countDown();
|
||||
}
|
||||
|
|
|
@ -153,6 +153,14 @@ public class JoinHelper {
|
|||
transportService.registerRequestHandler(MembershipAction.DISCOVERY_JOIN_VALIDATE_ACTION_NAME,
|
||||
ValidateJoinRequest::new, ThreadPool.Names.GENERIC,
|
||||
(request, channel, task) -> {
|
||||
final ClusterState localState = currentStateSupplier.get();
|
||||
if (localState.metaData().clusterUUIDCommitted() &&
|
||||
localState.metaData().clusterUUID().equals(request.getState().metaData().clusterUUID()) == false) {
|
||||
throw new CoordinationStateRejectedException("mixed-version cluster join validation on cluster state" +
|
||||
" with a different cluster uuid " + request.getState().metaData().clusterUUID() +
|
||||
" than local cluster uuid " + localState.metaData().clusterUUID()
|
||||
+ ", rejecting");
|
||||
}
|
||||
joinValidators.forEach(action -> action.accept(transportService.getLocalNode(), request.getState()));
|
||||
channel.sendResponse(Empty.INSTANCE);
|
||||
});
|
||||
|
|
|
@ -88,7 +88,11 @@ public class MetaDataIndexUpgradeService {
|
|||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||
// Throws an exception if there are too-old segments:
|
||||
if (isUpgraded(indexMetaData)) {
|
||||
return indexMetaData;
|
||||
/*
|
||||
* We still need to check for broken index settings since it might be that a user removed a plugin that registers a setting
|
||||
* needed by this index.
|
||||
*/
|
||||
return archiveBrokenIndexSettings(indexMetaData);
|
||||
}
|
||||
checkSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion);
|
||||
IndexMetaData newMetaData = indexMetaData;
|
||||
|
|
|
@ -24,6 +24,8 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -150,7 +152,7 @@ public abstract class Decision implements ToXContent, Writeable {
|
|||
/**
|
||||
* Simple class representing a single decision
|
||||
*/
|
||||
public static class Single extends Decision {
|
||||
public static class Single extends Decision implements ToXContentObject {
|
||||
private Type type;
|
||||
private String label;
|
||||
private String explanation;
|
||||
|
@ -269,7 +271,7 @@ public abstract class Decision implements ToXContent, Writeable {
|
|||
/**
|
||||
* Simple class representing a list of decisions
|
||||
*/
|
||||
public static class Multi extends Decision {
|
||||
public static class Multi extends Decision implements ToXContentFragment {
|
||||
|
||||
private final List<Decision> decisions = new ArrayList<>();
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -96,8 +97,9 @@ public interface BlobContainer {
|
|||
* @throws IOException if the input stream could not be read, or the target blob could not be written to.
|
||||
*/
|
||||
void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes a blob with giving name, if the blob exists. If the blob does not exist,
|
||||
* Deletes the blob with the given name, if the blob exists. If the blob does not exist,
|
||||
* this method throws a NoSuchFileException.
|
||||
*
|
||||
* @param blobName
|
||||
|
@ -107,6 +109,33 @@ public interface BlobContainer {
|
|||
*/
|
||||
void deleteBlob(String blobName) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes the blobs with given names. Unlike {@link #deleteBlob(String)} this method will not throw an exception
|
||||
* when one or multiple of the given blobs don't exist and simply ignore this case.
|
||||
*
|
||||
* @param blobNames The names of the blob to delete.
|
||||
* @throws IOException if a subset of blob exists but could not be deleted.
|
||||
*/
|
||||
default void deleteBlobsIgnoringIfNotExists(List<String> blobNames) throws IOException {
|
||||
IOException ioe = null;
|
||||
for (String blobName : blobNames) {
|
||||
try {
|
||||
deleteBlob(blobName);
|
||||
} catch (NoSuchFileException e) {
|
||||
// ignored
|
||||
} catch (IOException e) {
|
||||
if (ioe == null) {
|
||||
ioe = e;
|
||||
} else {
|
||||
ioe.addSuppressed(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (ioe != null) {
|
||||
throw ioe;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a blob with giving name, ignoring if the blob does not exist.
|
||||
*
|
||||
|
|
|
@ -40,10 +40,10 @@ public class FsBlobStore implements BlobStore {
|
|||
|
||||
private final boolean readOnly;
|
||||
|
||||
public FsBlobStore(Settings settings, Path path) throws IOException {
|
||||
public FsBlobStore(Settings settings, Path path, boolean readonly) throws IOException {
|
||||
this.path = path;
|
||||
this.readOnly = settings.getAsBoolean("readonly", false);
|
||||
if (!this.readOnly) {
|
||||
this.readOnly = readonly;
|
||||
if (this.readOnly == false) {
|
||||
Files.createDirectories(path);
|
||||
}
|
||||
this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size",
|
||||
|
@ -74,6 +74,11 @@ public class FsBlobStore implements BlobStore {
|
|||
|
||||
@Override
|
||||
public void delete(BlobPath path) throws IOException {
|
||||
assert readOnly == false : "should not delete anything from a readonly repository: " + path;
|
||||
//noinspection ConstantConditions in case assertions are disabled
|
||||
if (readOnly) {
|
||||
throw new ElasticsearchException("unexpectedly deleting [" + path + "] from a readonly repository");
|
||||
}
|
||||
IOUtils.rm(buildPath(path));
|
||||
}
|
||||
|
||||
|
@ -84,7 +89,7 @@ public class FsBlobStore implements BlobStore {
|
|||
|
||||
private synchronized Path buildAndCreate(BlobPath path) throws IOException {
|
||||
Path f = buildPath(path);
|
||||
if (!readOnly) {
|
||||
if (readOnly == false) {
|
||||
Files.createDirectories(f);
|
||||
}
|
||||
return f;
|
||||
|
|
|
@ -19,15 +19,12 @@
|
|||
|
||||
package org.elasticsearch.common.component;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
public abstract class AbstractLifecycleComponent implements LifecycleComponent {
|
||||
private static final Logger logger = LogManager.getLogger(AbstractLifecycleComponent.class);
|
||||
|
||||
protected final Lifecycle lifecycle = new Lifecycle();
|
||||
|
||||
|
@ -52,16 +49,18 @@ public abstract class AbstractLifecycleComponent implements LifecycleComponent {
|
|||
|
||||
@Override
|
||||
public void start() {
|
||||
if (!lifecycle.canMoveToStarted()) {
|
||||
return;
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.beforeStart();
|
||||
}
|
||||
doStart();
|
||||
lifecycle.moveToStarted();
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.afterStart();
|
||||
synchronized (lifecycle) {
|
||||
if (!lifecycle.canMoveToStarted()) {
|
||||
return;
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.beforeStart();
|
||||
}
|
||||
doStart();
|
||||
lifecycle.moveToStarted();
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.afterStart();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,16 +68,18 @@ public abstract class AbstractLifecycleComponent implements LifecycleComponent {
|
|||
|
||||
@Override
|
||||
public void stop() {
|
||||
if (!lifecycle.canMoveToStopped()) {
|
||||
return;
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.beforeStop();
|
||||
}
|
||||
lifecycle.moveToStopped();
|
||||
doStop();
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.afterStop();
|
||||
synchronized (lifecycle) {
|
||||
if (!lifecycle.canMoveToStopped()) {
|
||||
return;
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.beforeStop();
|
||||
}
|
||||
lifecycle.moveToStopped();
|
||||
doStop();
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.afterStop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,25 +87,26 @@ public abstract class AbstractLifecycleComponent implements LifecycleComponent {
|
|||
|
||||
@Override
|
||||
public void close() {
|
||||
if (lifecycle.started()) {
|
||||
stop();
|
||||
}
|
||||
if (!lifecycle.canMoveToClosed()) {
|
||||
return;
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.beforeClose();
|
||||
}
|
||||
lifecycle.moveToClosed();
|
||||
try {
|
||||
doClose();
|
||||
} catch (IOException e) {
|
||||
// TODO: we need to separate out closing (ie shutting down) services, vs releasing runtime transient
|
||||
// structures. Shutting down services should use IOUtils.close
|
||||
logger.warn("failed to close " + getClass().getName(), e);
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.afterClose();
|
||||
synchronized (lifecycle) {
|
||||
if (lifecycle.started()) {
|
||||
stop();
|
||||
}
|
||||
if (!lifecycle.canMoveToClosed()) {
|
||||
return;
|
||||
}
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.beforeClose();
|
||||
}
|
||||
lifecycle.moveToClosed();
|
||||
try {
|
||||
doClose();
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
} finally {
|
||||
for (LifecycleListener listener : listeners) {
|
||||
listener.afterClose();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,15 +39,22 @@ package org.elasticsearch.common.component;
|
|||
* }
|
||||
* </pre>
|
||||
* <p>
|
||||
* NOTE: The Lifecycle class is thread-safe. It is also possible to prevent concurrent state transitions
|
||||
* by locking on the Lifecycle object itself. This is typically useful when chaining multiple transitions.
|
||||
* <p>
|
||||
* Note, closed is only allowed to be called when stopped, so make sure to stop the component first.
|
||||
* Here is how the logic can be applied:
|
||||
* Here is how the logic can be applied. A lock of the {@code lifecycleState} object is taken so that
|
||||
* another thread cannot move the state from {@code STOPPED} to {@code STARTED} before it has moved to
|
||||
* {@code CLOSED}.
|
||||
* <pre>
|
||||
* public void close() {
|
||||
* if (lifecycleState.started()) {
|
||||
* stop();
|
||||
* }
|
||||
* if (!lifecycleState.moveToClosed()) {
|
||||
* return;
|
||||
* synchronized (lifecycleState) {
|
||||
* if (lifecycleState.started()) {
|
||||
* stop();
|
||||
* }
|
||||
* if (!lifecycleState.moveToClosed()) {
|
||||
* return;
|
||||
* }
|
||||
* }
|
||||
* // perform close logic here
|
||||
* }
|
||||
|
@ -116,7 +123,7 @@ public class Lifecycle {
|
|||
}
|
||||
|
||||
|
||||
public boolean moveToStarted() throws IllegalStateException {
|
||||
public synchronized boolean moveToStarted() throws IllegalStateException {
|
||||
State localState = this.state;
|
||||
if (localState == State.INITIALIZED || localState == State.STOPPED) {
|
||||
state = State.STARTED;
|
||||
|
@ -145,7 +152,7 @@ public class Lifecycle {
|
|||
throw new IllegalStateException("Can't move to stopped with unknown state");
|
||||
}
|
||||
|
||||
public boolean moveToStopped() throws IllegalStateException {
|
||||
public synchronized boolean moveToStopped() throws IllegalStateException {
|
||||
State localState = state;
|
||||
if (localState == State.STARTED) {
|
||||
state = State.STOPPED;
|
||||
|
@ -171,7 +178,7 @@ public class Lifecycle {
|
|||
return true;
|
||||
}
|
||||
|
||||
public boolean moveToClosed() throws IllegalStateException {
|
||||
public synchronized boolean moveToClosed() throws IllegalStateException {
|
||||
State localState = state;
|
||||
if (localState == State.CLOSED) {
|
||||
return false;
|
||||
|
|
|
@ -406,7 +406,7 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
|
|||
* @param edges a list of edges to which all edges of the component will be added (could be <code>null</code>)
|
||||
* @return number of edges that belong to this component
|
||||
*/
|
||||
private static int component(final Edge edge, final int id, final ArrayList<Edge> edges) {
|
||||
private static int component(final Edge edge, final int id, final ArrayList<Edge> edges, double[] partitionPoint) {
|
||||
// find a coordinate that is not part of the dateline
|
||||
Edge any = edge;
|
||||
while(any.coordinate.x == +DATELINE || any.coordinate.x == -DATELINE) {
|
||||
|
@ -438,6 +438,9 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
|
|||
if (edges != null) {
|
||||
// found a closed loop - we have two connected components so we need to slice into two distinct components
|
||||
if (visitedEdge.containsKey(current.coordinate)) {
|
||||
partitionPoint[0] = current.coordinate.x;
|
||||
partitionPoint[1] = current.coordinate.y;
|
||||
partitionPoint[2] = current.coordinate.z;
|
||||
if (connectedComponents > 0 && current.next != edge) {
|
||||
throw new InvalidShapeException("Shape contains more than one shared point");
|
||||
}
|
||||
|
@ -479,10 +482,20 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
|
|||
* @param coordinates Array of coordinates to write the result to
|
||||
* @return the coordinates parameter
|
||||
*/
|
||||
private static Coordinate[] coordinates(Edge component, Coordinate[] coordinates) {
|
||||
private static Coordinate[] coordinates(Edge component, Coordinate[] coordinates, double[] partitionPoint) {
|
||||
for (int i = 0; i < coordinates.length; i++) {
|
||||
coordinates[i] = (component = component.next).coordinate;
|
||||
}
|
||||
// First and last coordinates must be equal
|
||||
if (coordinates[0].equals(coordinates[coordinates.length - 1]) == false) {
|
||||
if (partitionPoint[2] == Double.NaN) {
|
||||
throw new InvalidShapeException("Self-intersection at or near point ["
|
||||
+ partitionPoint[0] + "," + partitionPoint[1] + "]");
|
||||
} else {
|
||||
throw new InvalidShapeException("Self-intersection at or near point ["
|
||||
+ partitionPoint[0] + "," + partitionPoint[1] + "," + partitionPoint[2] + "]");
|
||||
}
|
||||
}
|
||||
return coordinates;
|
||||
}
|
||||
|
||||
|
@ -512,8 +525,9 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
|
|||
final Coordinate[][] points = new Coordinate[numHoles][];
|
||||
|
||||
for (int i = 0; i < numHoles; i++) {
|
||||
int length = component(holes[i], -(i+1), null); // mark as visited by inverting the sign
|
||||
points[i] = coordinates(holes[i], new Coordinate[length+1]);
|
||||
double[] partitionPoint = new double[3];
|
||||
int length = component(holes[i], -(i+1), null, partitionPoint); // mark as visited by inverting the sign
|
||||
points[i] = coordinates(holes[i], new Coordinate[length+1], partitionPoint);
|
||||
}
|
||||
|
||||
return points;
|
||||
|
@ -524,9 +538,10 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
|
|||
|
||||
for (int i = 0; i < edges.length; i++) {
|
||||
if (edges[i].component >= 0) {
|
||||
int length = component(edges[i], -(components.size()+numHoles+1), mainEdges);
|
||||
double[] partitionPoint = new double[3];
|
||||
int length = component(edges[i], -(components.size()+numHoles+1), mainEdges, partitionPoint);
|
||||
List<Coordinate[]> component = new ArrayList<>();
|
||||
component.add(coordinates(edges[i], new Coordinate[length+1]));
|
||||
component.add(coordinates(edges[i], new Coordinate[length+1], partitionPoint));
|
||||
components.add(component);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,8 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -52,7 +54,11 @@ public class Queries {
|
|||
|
||||
|
||||
public static Query newUnmappedFieldQuery(String field) {
|
||||
return Queries.newMatchNoDocsQuery("unmapped field [" + (field != null ? field : "null") + "]");
|
||||
return newUnmappedFieldsQuery(Collections.singletonList(field));
|
||||
}
|
||||
|
||||
public static Query newUnmappedFieldsQuery(Collection<String> fields) {
|
||||
return Queries.newMatchNoDocsQuery("unmapped fields " + fields);
|
||||
}
|
||||
|
||||
public static Query newLenientFieldQuery(String field, RuntimeException e) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue