Merge branch 'master' into ccr

This commit is contained in:
Nhat Nguyen 2018-08-30 23:09:01 -04:00
commit d3f32273eb
153 changed files with 4317 additions and 1625 deletions

View File

@ -17,12 +17,22 @@
* under the License.
*/
import java.nio.file.Files
import org.gradle.util.GradleVersion
plugins {
id 'java-gradle-plugin'
id 'groovy'
}
gradlePlugin {
plugins {
simplePlugin {
id = 'elasticsearch.clusterformation'
implementationClass = 'org.elasticsearch.gradle.clusterformation.ClusterformationPlugin'
}
}
}
group = 'org.elasticsearch.gradle'
String minimumGradleVersion = file('src/main/resources/minimumGradleVersion').text.trim()
@ -166,7 +176,6 @@ if (project != rootProject) {
it.tasks.matching { it.name == 'publishNebulaPublicationToLocalTestRepository'}
}
exclude "**/*Tests.class"
include "**/*IT.class"
testClassesDirs = sourceSets.test.output.classesDirs
classpath = sourceSets.test.runtimeClasspath
inputs.dir(file("src/testKit"))

View File

@ -22,8 +22,8 @@ package org.elasticsearch.gradle.precommit
import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin
import org.elasticsearch.gradle.LoggedExec
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Classpath
import org.gradle.api.tasks.OutputFile
/**
* Runs CheckJarHell on a classpath.
*/
@ -35,9 +35,13 @@ public class JarHellTask extends LoggedExec {
* inputs (ie the jars/class files).
*/
@OutputFile
File successMarker = new File(project.buildDir, 'markers/jarHell')
File successMarker
@Classpath
FileCollection classpath
public JarHellTask() {
successMarker = new File(project.buildDir, 'markers/jarHell-' + getName())
project.afterEvaluate {
FileCollection classpath = project.sourceSets.test.runtimeClasspath
if (project.plugins.hasPlugin(ShadowPlugin)) {

View File

@ -31,7 +31,7 @@ class PrecommitTasks {
/** Adds a precommit task, which depends on non-test verification tasks. */
public static Task create(Project project, boolean includeDependencyLicenses) {
Configuration forbiddenApisConfiguration = project.configurations.create("forbiddenApisCliJar")
project.configurations.create("forbiddenApisCliJar")
project.dependencies {
forbiddenApisCliJar ('de.thetaphi:forbiddenapis:2.5')
}
@ -43,7 +43,7 @@ class PrecommitTasks {
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('filepermissions', FilePermissionsTask.class),
project.tasks.create('jarHell', JarHellTask.class),
configureJarHell(project),
configureThirdPartyAudit(project)
]
@ -80,6 +80,12 @@ class PrecommitTasks {
return project.tasks.create(precommitOptions)
}
private static Task configureJarHell(Project project) {
Task task = project.tasks.create('jarHell', JarHellTask.class)
task.classpath = project.sourceSets.test.runtimeClasspath
return task
}
private static Task configureThirdPartyAudit(Project project) {
ThirdPartyAuditTask thirdPartyAuditTask = project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)
ExportElasticsearchBuildResourcesTask buildResources = project.tasks.getByName('buildResources')

View File

@ -0,0 +1,68 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch;
import org.gradle.api.Action;
import org.gradle.api.Project;
import org.gradle.api.file.CopySpec;
import org.gradle.api.file.FileTree;
import org.gradle.api.tasks.WorkResult;
import org.gradle.process.ExecResult;
import org.gradle.process.JavaExecSpec;
import java.io.File;
/**
* Facilitate access to Gradle services without a direct dependency on Project.
*
* In a future release Gradle will offer service injection, this adapter plays that role until that time.
* It exposes the service methods that are part of the public API as the classes implementing them are not.
* Today service injection is <a href="https://github.com/gradle/gradle/issues/2363">not available</a> for
* extensions.
*
* Everything exposed here must be thread safe. That is the very reason why project is not passed in directly.
*/
public class GradleServicesAdapter {
public final Project project;
public GradleServicesAdapter(Project project) {
this.project = project;
}
public static GradleServicesAdapter getInstance(Project project) {
return new GradleServicesAdapter(project);
}
public WorkResult copy(Action<? super CopySpec> action) {
return project.copy(action);
}
public WorkResult sync(Action<? super CopySpec> action) {
return project.sync(action);
}
public ExecResult javaexec(Action<? super JavaExecSpec> action) {
return project.javaexec(action);
}
public FileTree zipTree(File zipPath) {
return project.zipTree(zipPath);
}
}

View File

@ -0,0 +1,36 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle;
public enum Distribution {
INTEG_TEST("integ-test-zip"),
ZIP("zip"),
ZIP_OSS("zip-oss");
private final String name;
Distribution(String name) {
this.name = name;
}
public String getName() {
return name;
}
}

View File

@ -0,0 +1,110 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.clusterformation;
import groovy.lang.Closure;
import org.elasticsearch.GradleServicesAdapter;
import org.gradle.api.NamedDomainObjectContainer;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.execution.TaskActionListener;
import org.gradle.api.execution.TaskExecutionListener;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.plugins.ExtraPropertiesExtension;
import org.gradle.api.tasks.TaskState;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ClusterformationPlugin implements Plugin<Project> {
public static final String LIST_TASK_NAME = "listElasticSearchClusters";
public static final String EXTENSION_NAME = "elasticSearchClusters";
private final Logger logger = Logging.getLogger(ClusterformationPlugin.class);
@Override
public void apply(Project project) {
NamedDomainObjectContainer<? extends ElasticsearchConfiguration> container = project.container(
ElasticsearchNode.class,
(name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project))
);
project.getExtensions().add(EXTENSION_NAME, container);
Task listTask = project.getTasks().create(LIST_TASK_NAME);
listTask.setGroup("ES cluster formation");
listTask.setDescription("Lists all ES clusters configured for this project");
listTask.doLast((Task task) ->
container.forEach((ElasticsearchConfiguration cluster) ->
logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution())
)
);
Map<Task, List<ElasticsearchConfiguration>> taskToCluster = new HashMap<>();
// register an extension for all current and future tasks, so that any task can declare that it wants to use a
// specific cluster.
project.getTasks().all((Task task) ->
task.getExtensions().findByType(ExtraPropertiesExtension.class)
.set(
"useCluster",
new Closure<Void>(this, this) {
public void doCall(ElasticsearchConfiguration conf) {
taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf);
}
})
);
project.getGradle().getTaskGraph().whenReady(taskExecutionGraph ->
taskExecutionGraph.getAllTasks()
.forEach(task ->
taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchConfiguration::claim)
)
);
project.getGradle().addListener(
new TaskActionListener() {
@Override
public void beforeActions(Task task) {
// we only start the cluster before the actions, so we'll not start it if the task is up-to-date
taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::start);
}
@Override
public void afterActions(Task task) {}
}
);
project.getGradle().addListener(
new TaskExecutionListener() {
@Override
public void afterExecute(Task task, TaskState state) {
// always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the
// cluster to start.
taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchConfiguration::unClaimAndStop);
}
@Override
public void beforeExecute(Task task) {}
}
);
}
}

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.clusterformation;
import org.elasticsearch.gradle.Distribution;
import org.elasticsearch.gradle.Version;
import java.util.concurrent.Future;
public interface ElasticsearchConfiguration {
String getName();
Version getVersion();
void setVersion(Version version);
default void setVersion(String version) {
setVersion(Version.fromString(version));
}
Distribution getDistribution();
void setDistribution(Distribution distribution);
void claim();
Future<Void> start();
void unClaimAndStop();
}

View File

@ -0,0 +1,130 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.clusterformation;
import org.elasticsearch.GradleServicesAdapter;
import org.elasticsearch.gradle.Distribution;
import org.elasticsearch.gradle.Version;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import java.util.Objects;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
public class ElasticsearchNode implements ElasticsearchConfiguration {
private final String name;
private final GradleServicesAdapter services;
private final AtomicInteger noOfClaims = new AtomicInteger();
private final AtomicBoolean started = new AtomicBoolean(false);
private final Logger logger = Logging.getLogger(ElasticsearchNode.class);
private Distribution distribution;
private Version version;
public ElasticsearchNode(String name, GradleServicesAdapter services) {
this.name = name;
this.services = services;
}
@Override
public String getName() {
return name;
}
@Override
public Version getVersion() {
return version;
}
@Override
public void setVersion(Version version) {
checkNotRunning();
this.version = version;
}
@Override
public Distribution getDistribution() {
return distribution;
}
@Override
public void setDistribution(Distribution distribution) {
checkNotRunning();
this.distribution = distribution;
}
@Override
public void claim() {
noOfClaims.incrementAndGet();
}
/**
* Start the cluster if not running. Does nothing if the cluster is already running.
*
* @return future of thread running in the background
*/
@Override
public Future<Void> start() {
if (started.getAndSet(true)) {
logger.lifecycle("Already started cluster: {}", name);
} else {
logger.lifecycle("Starting cluster: {}", name);
}
return null;
}
/**
* Stops a running cluster if it's not claimed. Does nothing otherwise.
*/
@Override
public void unClaimAndStop() {
int decrementedClaims = noOfClaims.decrementAndGet();
if (decrementedClaims > 0) {
logger.lifecycle("Not stopping {}, since cluster still has {} claim(s)", name, decrementedClaims);
return;
}
if (started.get() == false) {
logger.lifecycle("Asked to unClaimAndStop, but cluster was not running: {}", name);
return;
}
logger.lifecycle("Stopping {}, number of claims is {}", name, decrementedClaims);
}
private void checkNotRunning() {
if (started.get()) {
throw new IllegalStateException("Configuration can not be altered while running ");
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ElasticsearchNode that = (ElasticsearchNode) o;
return Objects.equals(name, that.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
}

View File

@ -1 +1 @@
4.9
4.10

View File

@ -153,17 +153,4 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase {
}
}
private String getLocalTestRepoPath() {
String property = System.getProperty("test.local-test-repo-path");
Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests");
File file = new File(property);
assertTrue("Expected " + property + " to exist, but it did not!", file.exists());
if (File.separator.equals("\\")) {
// Use / on Windows too, the build script is not happy with \
return file.getAbsolutePath().replace(File.separator, "/");
} else {
return file.getAbsolutePath();
}
}
}

View File

@ -40,7 +40,7 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe
.withArguments("buildResources", "-s", "-i")
.withPluginClasspath()
.build();
assertTaskSuccessfull(result, ":buildResources");
assertTaskSuccessful(result, ":buildResources");
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml");
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml");
@ -61,8 +61,8 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe
.withPluginClasspath()
.build();
assertTaskSuccessfull(result, ":buildResources");
assertTaskSuccessfull(result, ":sampleCopyAll");
assertTaskSuccessful(result, ":buildResources");
assertTaskSuccessful(result, ":sampleCopyAll");
assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle.xml");
// This is a side effect of compile time reference
assertBuildFileExists(result, PROJECT_NAME, "sampleCopyAll/checkstyle_suppressions.xml");
@ -75,7 +75,7 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe
.withPluginClasspath()
.build();
assertTaskSuccessfull(result, ":sample");
assertTaskSuccessful(result, ":sample");
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml");
assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml");
}

View File

@ -0,0 +1,144 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.clusterformation;
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
import org.gradle.testkit.runner.BuildResult;
import org.gradle.testkit.runner.GradleRunner;
import org.gradle.testkit.runner.TaskOutcome;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
public class ClusterformationPluginIT extends GradleIntegrationTestCase {
public void testListClusters() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("clusterformation"))
.withArguments("listElasticSearchClusters", "-s")
.withPluginClasspath()
.build();
assertEquals(TaskOutcome.SUCCESS, result.task(":listElasticSearchClusters").getOutcome());
assertOutputContains(
result.getOutput(),
" * myTestCluster:"
);
}
public void testUseClusterByOne() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("clusterformation"))
.withArguments("user1", "-s")
.withPluginClasspath()
.build();
assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome());
assertOutputContains(
result.getOutput(),
"Starting cluster: myTestCluster",
"Stopping myTestCluster, number of claims is 0"
);
}
public void testUseClusterByOneWithDryRun() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("clusterformation"))
.withArguments("user1", "-s", "--dry-run")
.withPluginClasspath()
.build();
assertNull(result.task(":user1"));
assertOutputDoesNotContain(
result.getOutput(),
"Starting cluster: myTestCluster",
"Stopping myTestCluster, number of claims is 0"
);
}
public void testUseClusterByTwo() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("clusterformation"))
.withArguments("user1", "user2", "-s")
.withPluginClasspath()
.build();
assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome());
assertEquals(TaskOutcome.SUCCESS, result.task(":user2").getOutcome());
assertOutputContains(
result.getOutput(),
"Starting cluster: myTestCluster",
"Not stopping myTestCluster, since cluster still has 1 claim(s)",
"Stopping myTestCluster, number of claims is 0"
);
}
public void testUseClusterByUpToDateTask() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("clusterformation"))
.withArguments("upToDate1", "upToDate2", "-s")
.withPluginClasspath()
.build();
assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate1").getOutcome());
assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate2").getOutcome());
assertOutputContains(
result.getOutput(),
"Not stopping myTestCluster, since cluster still has 1 claim(s)",
"cluster was not running: myTestCluster"
);
assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster");
}
public void testUseClusterBySkippedTask() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("clusterformation"))
.withArguments("skipped1", "skipped2", "-s")
.withPluginClasspath()
.build();
assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome());
assertEquals(TaskOutcome.SKIPPED, result.task(":skipped2").getOutcome());
assertOutputContains(
result.getOutput(),
"Not stopping myTestCluster, since cluster still has 1 claim(s)",
"cluster was not running: myTestCluster"
);
assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster");
}
public void tetUseClusterBySkippedAndWorkingTask() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("clusterformation"))
.withArguments("skipped1", "user1", "-s")
.withPluginClasspath()
.build();
assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome());
assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome());
assertOutputContains(
result.getOutput(),
"> Task :user1",
"Starting cluster: myTestCluster",
"Stopping myTestCluster, number of claims is 0"
);
}
}

View File

@ -0,0 +1,42 @@
package org.elasticsearch.gradle.precommit;
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
import org.gradle.testkit.runner.BuildResult;
import org.gradle.testkit.runner.GradleRunner;
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
public class JarHellTaskIT extends GradleIntegrationTestCase {
public void testJarHellDetected() {
BuildResult result = GradleRunner.create()
.withProjectDir(getProjectDir("jarHell"))
.withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath())
.withPluginClasspath()
.buildAndFail();
assertTaskFailed(result, ":jarHell");
assertOutputContains(
result.getOutput(),
"Exception in thread \"main\" java.lang.IllegalStateException: jar hell!",
"class: org.apache.logging.log4j.Logger"
);
}
}

View File

@ -9,6 +9,7 @@ import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -66,15 +67,24 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
}
}
protected void assertTaskSuccessfull(BuildResult result, String taskName) {
protected void assertTaskFailed(BuildResult result, String taskName) {
assertTaskOutcome(result, taskName, TaskOutcome.FAILED);
}
protected void assertTaskSuccessful(BuildResult result, String taskName) {
assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS);
}
private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) {
BuildTask task = result.task(taskName);
if (task == null) {
fail("Expected task `" + taskName + "` to be successful, but it did not run");
fail("Expected task `" + taskName + "` to be " + taskOutcome +", but it did not run" +
"\n\nOutput is:\n" + result.getOutput());
}
assertEquals(
"Expected task to be successful but it was: " + task.getOutcome() +
"\n\nOutput is:\n" + result.getOutput() ,
TaskOutcome.SUCCESS,
taskOutcome + "\n\nOutput is:\n" + result.getOutput() ,
taskOutcome,
task.getOutcome()
);
}
@ -109,4 +119,17 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase {
Files.exists(absPath)
);
}
protected String getLocalTestRepoPath() {
String property = System.getProperty("test.local-test-repo-path");
Objects.requireNonNull(property, "test.local-test-repo-path not passed to tests");
File file = new File(property);
assertTrue("Expected " + property + " to exist, but it did not!", file.exists());
if (File.separator.equals("\\")) {
// Use / on Windows too, the build script is not happy with \
return file.getAbsolutePath().replace(File.separator, "/");
} else {
return file.getAbsolutePath();
}
}
}

View File

@ -0,0 +1,41 @@
plugins {
id 'elasticsearch.clusterformation'
}
elasticSearchClusters {
myTestCluster {
distribution = 'ZIP'
}
}
task user1 {
useCluster elasticSearchClusters.myTestCluster
doLast {
println "user1 executing"
}
}
task user2 {
useCluster elasticSearchClusters.myTestCluster
doLast {
println "user2 executing"
}
}
task upToDate1 {
useCluster elasticSearchClusters.myTestCluster
}
task upToDate2 {
useCluster elasticSearchClusters.myTestCluster
}
task skipped1 {
enabled = false
useCluster elasticSearchClusters.myTestCluster
}
task skipped2 {
enabled = false
useCluster elasticSearchClusters.myTestCluster
}

View File

@ -0,0 +1,29 @@
plugins {
id 'java'
id 'elasticsearch.build'
}
dependencyLicenses.enabled = false
dependenciesInfo.enabled = false
forbiddenApisMain.enabled = false
forbiddenApisTest.enabled = false
thirdPartyAudit.enabled = false
namingConventions.enabled = false
ext.licenseFile = file("$buildDir/dummy/license")
ext.noticeFile = file("$buildDir/dummy/notice")
repositories {
mavenCentral()
repositories {
maven {
url System.getProperty("local.repo.path")
}
}
}
dependencies {
// Needed for the JarHell task
testCompile ("org.elasticsearch.test:framework:${versions.elasticsearch}")
// causes jar hell with local sources
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}"
}

View File

@ -0,0 +1,7 @@
package org.apache.logging.log4j;
// Jar Hell !
public class Logger {
}

View File

@ -88,6 +88,7 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.client.security.RefreshPolicy;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
@ -1353,11 +1354,16 @@ final class RequestConverters {
Params withRefresh(boolean refresh) {
if (refresh) {
return withRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
return withRefreshPolicy(RefreshPolicy.IMMEDIATE);
}
return this;
}
/**
* @deprecated If creating a new HLRC ReST API call, use {@link RefreshPolicy}
* instead of {@link WriteRequest.RefreshPolicy} from the server project
*/
@Deprecated
Params withRefreshPolicy(WriteRequest.RefreshPolicy refreshPolicy) {
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
return putParam("refresh", refreshPolicy.getValue());
@ -1365,6 +1371,13 @@ final class RequestConverters {
return this;
}
Params withRefreshPolicy(RefreshPolicy refreshPolicy) {
if (refreshPolicy != RefreshPolicy.NONE) {
return putParam("refresh", refreshPolicy.getValue());
}
return this;
}
Params withRetryOnConflict(int retryOnConflict) {
if (retryOnConflict > 0) {
return putParam("retry_on_conflict", String.valueOf(retryOnConflict));

View File

@ -0,0 +1,59 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.security;
/**
* Enumeration of values that control the refresh policy for a request that
* supports specifying a refresh policy.
*/
public enum RefreshPolicy {
/**
* Don't refresh after this request. The default.
*/
NONE("false"),
/**
* Force a refresh as part of this request. This refresh policy does not scale for high indexing or search throughput but is useful
* to present a consistent view to for indices with very low traffic. And it is wonderful for tests!
*/
IMMEDIATE("true"),
/**
* Leave this request open until a refresh has made the contents of this request visible to search. This refresh policy is
* compatible with high indexing and search throughput but it causes the request to wait to reply until a refresh occurs.
*/
WAIT_UNTIL("wait_for");
private final String value;
RefreshPolicy(String value) {
this.value = value;
}
public String getValue() {
return value;
}
/**
* Get the default refresh policy, which is <code>NONE</code>
*/
public static RefreshPolicy getDefault() {
return RefreshPolicy.NONE;
}
}

View File

@ -1,71 +1,14 @@
[[discovery-file]]
=== File-Based Discovery Plugin
The file-based discovery plugin uses a list of hosts/ports in a `unicast_hosts.txt` file
in the `config/discovery-file` directory for unicast discovery.
The functionality provided by the `discovery-file` plugin is now available in
Elasticsearch without requiring a plugin. This plugin still exists to ensure
backwards compatibility, but it will be removed in a future version.
On installation, this plugin creates a file at
`$ES_PATH_CONF/discovery-file/unicast_hosts.txt` that comprises comments that
describe how to use it. It is preferable not to install this plugin and instead
to create this file, and its containing directory, using standard tools.
:plugin_name: discovery-file
include::install_remove.asciidoc[]
[[discovery-file-usage]]
[float]
==== Using the file-based discovery plugin
The file-based discovery plugin provides the ability to specify the
unicast hosts list through a simple `unicast_hosts.txt` file that can
be dynamically updated at any time. To enable, add the following in `elasticsearch.yml`:
[source,yaml]
----
discovery.zen.hosts_provider: file
----
This plugin simply provides a facility to supply the unicast hosts list for
zen discovery through an external file that can be updated at any time by a side process.
For example, this gives a convenient mechanism for an Elasticsearch instance
that is run in docker containers to be dynamically supplied a list of IP
addresses to connect to for zen discovery when those IP addresses may not be
known at node startup.
Note that the file-based discovery plugin is meant to augment the unicast
hosts list in `elasticsearch.yml` (if specified), not replace it. Therefore,
if there are valid unicast host entries in `discovery.zen.ping.unicast.hosts`,
they will be used in addition to those supplied in `unicast_hosts.txt`.
Anytime a change is made to the `unicast_hosts.txt` file, even as Elasticsearch
continues to run, the new changes will be picked up by the plugin and the
new hosts list will be used for the next pinging round for master election.
Upon installation of the plugin, a default `unicast_hosts.txt` file will
be found in the `$CONFIG_DIR/discovery-file` directory. This default file
will contain some comments about what the file should contain. All comments
for this file must appear on their lines starting with `#` (i.e. comments
cannot start in the middle of a line).
[[discovery-file-format]]
[float]
==== unicast_hosts.txt file format
The format of the file is to specify one unicast host entry per line.
Each unicast host entry consists of the host (host name or IP address) and
an optional transport port number. If the port number is specified, is must
come immediately after the host (on the same line) separated by a `:`.
If the port number is not specified, a default value of 9300 is used.
For example, this is an example of `unicast_hosts.txt` for a cluster with
four nodes that participate in unicast discovery, some of which are not
running on the default port:
[source,txt]
----------------------------------------------------------------
10.10.10.5
10.10.10.6:9305
10.10.10.5:10005
# an IPv6 address
[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301
----------------------------------------------------------------
Host names are allowed instead of IP addresses (similar to
`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be
specified in brackets with the port coming after the brackets.

View File

@ -10,71 +10,66 @@ include::install_remove.asciidoc[]
[[repository-gcs-usage]]
==== Getting started
The plugin uses the https://cloud.google.com/storage/docs/json_api/[Google Cloud Storage JSON API] (v1)
to connect to the Storage service. If this is the first time you use Google Cloud Storage, you first
need to connect to the https://console.cloud.google.com/[Google Cloud Platform Console] and create a new
project. Once your project is created, you must enable the Cloud Storage Service for your project.
The plugin uses the https://github.com/GoogleCloudPlatform/google-cloud-java/tree/master/google-cloud-clients/google-cloud-storage[Google Cloud Java Client for Storage]
to connect to the Storage service. If you are using
https://cloud.google.com/storage/[Google Cloud Storage] for the first time, you
must connect to the https://console.cloud.google.com/[Google Cloud Platform Console]
and create a new project. After your project is created, you must enable the
Cloud Storage Service for your project.
[[repository-gcs-creating-bucket]]
===== Creating a Bucket
Google Cloud Storage service uses the concept of https://cloud.google.com/storage/docs/key-terms[Bucket]
as a container for all the data. Buckets are usually created using the
https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin will not automatically
create buckets.
The Google Cloud Storage service uses the concept of a
https://cloud.google.com/storage/docs/key-terms[bucket] as a container for all
the data. Buckets are usually created using the
https://console.cloud.google.com/[Google Cloud Platform Console]. The plugin
does not automatically create buckets.
To create a new bucket:
1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]
2. Select your project
3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser]
4. Click the "Create Bucket" button
5. Enter the name of the new bucket
6. Select a storage class
7. Select a location
8. Click the "Create" button
1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console].
2. Select your project.
3. Go to the https://console.cloud.google.com/storage/browser[Storage Browser].
4. Click the *Create Bucket* button.
5. Enter the name of the new bucket.
6. Select a storage class.
7. Select a location.
8. Click the *Create* button.
The bucket should now be created.
For more detailed instructions, see the
https://cloud.google.com/storage/docs/quickstart-console#create_a_bucket[Google Cloud documentation].
[[repository-gcs-service-authentication]]
===== Service Authentication
The plugin supports two authentication modes:
* The built-in <<repository-gcs-using-compute-engine, Compute Engine authentication>>. This mode is
recommended if your Elasticsearch node is running on a Compute Engine virtual machine.
* Specifying <<repository-gcs-using-service-account, Service Account>> credentials.
[[repository-gcs-using-compute-engine]]
===== Using Compute Engine
When running on Compute Engine, the plugin use Google's built-in authentication mechanism to
authenticate on the Storage service. Compute Engine virtual machines are usually associated to a
default service account. This service account can be found in the VM instance details in the
https://console.cloud.google.com/compute/[Compute Engine console].
This is the default authentication mode and requires no configuration.
NOTE: The Compute Engine VM must be allowed to use the Storage service. This can be done only at VM
creation time, when "Storage" access can be configured to "Read/Write" permission. Check your
instance details at the section "Cloud API access scopes".
The plugin must authenticate the requests it makes to the Google Cloud Storage
service. It is common for Google client libraries to employ a strategy named https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application[application default credentials].
However, that strategy is **not** supported for use with Elasticsearch. The
plugin operates under the Elasticsearch process, which runs with the security
manager enabled. The security manager obstructs the "automatic" credential discovery.
Therefore, you must configure <<repository-gcs-using-service-account,service account>>
credentials even if you are using an environment that does not normally require
this configuration (such as Compute Engine, Kubernetes Engine or App Engine).
[[repository-gcs-using-service-account]]
===== Using a Service Account
If your Elasticsearch node is not running on Compute Engine, or if you don't want to use Google's
built-in authentication mechanism, you can authenticate on the Storage service using a
https://cloud.google.com/iam/docs/overview#service_account[Service Account] file.
You have to obtain and provide https://cloud.google.com/iam/docs/overview#service_account[service account credentials]
manually.
To create a service account file:
For detailed information about generating JSON service account files, see the https://cloud.google.com/storage/docs/authentication?hl=en#service_accounts[Google Cloud documentation].
Note that the PKCS12 format is not supported by this plugin.
1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]
2. Select your project
3. Got to the https://console.cloud.google.com/permissions[Permission] tab
4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab
5. Click on "Create service account"
6. Once created, select the new service account and download a JSON key file
Here is a summary of the steps:
A service account file looks like this:
1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console].
2. Select your project.
3. Got to the https://console.cloud.google.com/permissions[Permission] tab.
4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab.
5. Click *Create service account*.
6. After the account is created, select it and download a JSON key file.
A JSON service account file looks like this:
[source,js]
----
@ -84,19 +79,26 @@ A service account file looks like this:
"private_key_id": "...",
"private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n",
"client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com",
"client_id": "..."
"client_id": "...",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/your-bucket@your-project-id.iam.gserviceaccount.com"
}
----
// NOTCONSOLE
This file must be stored in the {ref}/secure-settings.html[elasticsearch keystore], under a setting name
of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client configuration.
The default client name is `default`, but a different client name can be specified in repository
settings using `client`.
To provide this file to the plugin, it must be stored in the {ref}/secure-settings.html[Elasticsearch keystore]. You must add a setting name of the form `gcs.client.NAME.credentials_file`, where `NAME`
is the name of the client configuration for the repository. The implicit client
name is `default`, but a different client name can be specified in the
repository settings with the `client` key.
For example, if specifying the credentials file in the keystore under
`gcs.client.my_alternate_client.credentials_file`, you can configure a repository to use these
credentials like this:
NOTE: Passing the file path via the GOOGLE_APPLICATION_CREDENTIALS environment
variable is **not** supported.
For example, if you added a `gcs.client.my_alternate_client.credentials_file`
setting in the keystore, you can configure a repository to use those credentials
like this:
[source,js]
----
@ -113,19 +115,18 @@ PUT _snapshot/my_gcs_repository
// TEST[skip:we don't have gcs setup while testing this]
The `credentials_file` settings are {ref}/secure-settings.html#reloadable-secure-settings[reloadable].
After you reload the settings, the internal `gcs` clients, used to transfer the
snapshot contents, will utilize the latest settings from the keystore.
After you reload the settings, the internal `gcs` clients, which are used to
transfer the snapshot contents, utilize the latest settings from the keystore.
NOTE: In progress snapshot/restore jobs will not be preempted by a *reload*
of the client's `credentials_file` settings. They will complete using the client
as it was built when the operation started.
NOTE: Snapshot or restore jobs that are in progress are not preempted by a *reload*
of the client's `credentials_file` settings. They complete using the client as
it was built when the operation started.
[[repository-gcs-client]]
==== Client Settings
The client used to connect to Google Cloud Storage has a number of settings available.
Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and specified
Client setting names are of the form `gcs.client.CLIENT_NAME.SETTING_NAME` and are specified
inside `elasticsearch.yml`. The default client name looked up by a `gcs` repository is
called `default`, but can be customized with the repository setting `client`.
@ -146,7 +147,7 @@ PUT _snapshot/my_gcs_repository
// TEST[skip:we don't have gcs setup while testing this]
Some settings are sensitive and must be stored in the
{ref}/secure-settings.html[elasticsearch keystore]. This is the case for the service account file:
{ref}/secure-settings.html[Elasticsearch keystore]. This is the case for the service account file:
[source,sh]
----
@ -185,7 +186,7 @@ are marked as `Secure`.
`project_id`::
The Google Cloud project id. This will be automatically infered from the credentials file but
The Google Cloud project id. This will be automatically inferred from the credentials file but
can be specified explicitly. For example, it can be used to switch between projects when the
same credentials are usable for both the production and the development projects.
@ -248,8 +249,8 @@ The following settings are supported:
The service account used to access the bucket must have the "Writer" access to the bucket:
1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]
2. Select your project
3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser]
4. Select the bucket and "Edit bucket permission"
5. The service account must be configured as a "User" with "Writer" access
1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console].
2. Select your project.
3. Got to the https://console.cloud.google.com/storage/browser[Storage Browser].
4. Select the bucket and "Edit bucket permission".
5. The service account must be configured as a "User" with "Writer" access.

View File

@ -101,8 +101,7 @@ which returns something similar to:
"translog_generation" : "2",
"max_seq_no" : "-1",
"sync_id" : "AVvFY-071siAOuFGEO9P", <1>
"max_unsafe_auto_id_timestamp" : "-1",
"min_retained_seq_no": "0"
"max_unsafe_auto_id_timestamp" : "-1"
},
"num_docs" : 0
}

View File

@ -1,13 +1,12 @@
[[modules-discovery-zen]]
=== Zen Discovery
The zen discovery is the built in discovery module for Elasticsearch and
the default. It provides unicast discovery, but can be extended to
support cloud environments and other forms of discovery.
Zen discovery is the built-in, default, discovery module for Elasticsearch. It
provides unicast and file-based discovery, and can be extended to support cloud
environments and other forms of discovery via plugins.
The zen discovery is integrated with other modules, for example, all
communication between nodes is done using the
<<modules-transport,transport>> module.
Zen discovery is integrated with other modules, for example, all communication
between nodes is done using the <<modules-transport,transport>> module.
It is separated into several sub modules, which are explained below:
@ -15,86 +14,155 @@ It is separated into several sub modules, which are explained below:
[[ping]]
==== Ping
This is the process where a node uses the discovery mechanisms to find
other nodes.
This is the process where a node uses the discovery mechanisms to find other
nodes.
[float]
[[discovery-seed-nodes]]
==== Seed nodes
Zen discovery uses a list of _seed_ nodes in order to start off the discovery
process. At startup, or when electing a new master, Elasticsearch tries to
connect to each seed node in its list, and holds a gossip-like conversation with
them to find other nodes and to build a complete picture of the cluster. By
default there are two methods for configuring the list of seed nodes: _unicast_
and _file-based_. It is recommended that the list of seed nodes comprises the
list of master-eligible nodes in the cluster.
[float]
[[unicast]]
===== Unicast
Unicast discovery requires a list of hosts to use that will act as gossip
routers. These hosts can be specified as hostnames or IP addresses; hosts
specified as hostnames are resolved to IP addresses during each round of
pinging. Note that if you are in an environment where DNS resolutions vary with
time, you might need to adjust your <<networkaddress-cache-ttl,JVM security
settings>>.
Unicast discovery configures a static list of hosts for use as seed nodes.
These hosts can be specified as hostnames or IP addresses; hosts specified as
hostnames are resolved to IP addresses during each round of pinging. Note that
if you are in an environment where DNS resolutions vary with time, you might
need to adjust your <<networkaddress-cache-ttl,JVM security settings>>.
It is recommended that the unicast hosts list be maintained as the list of
master-eligible nodes in the cluster.
The list of hosts is set using the `discovery.zen.ping.unicast.hosts` static
setting. This is either an array of hosts or a comma-delimited string. Each
value should be in the form of `host:port` or `host` (where `port` defaults to
the setting `transport.profiles.default.port` falling back to
`transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. The
default for this setting is `127.0.0.1, [::1]`
Unicast discovery provides the following settings with the `discovery.zen.ping.unicast` prefix:
Additionally, the `discovery.zen.ping.unicast.resolve_timeout` configures the
amount of time to wait for DNS lookups on each round of pinging. This is
specified as a <<time-units, time unit>> and defaults to 5s.
[cols="<,<",options="header",]
|=======================================================================
|Setting |Description
|`hosts` |Either an array setting or a comma delimited setting. Each
value should be in the form of `host:port` or `host` (where `port` defaults to the setting `transport.profiles.default.port`
falling back to `transport.tcp.port` if not set). Note that IPv6 hosts must be bracketed. Defaults to `127.0.0.1, [::1]`
|`hosts.resolve_timeout` |The amount of time to wait for DNS lookups on each round of pinging. Specified as
<<time-units, time units>>. Defaults to 5s.
|=======================================================================
Unicast discovery uses the <<modules-transport,transport>> module to perform the
discovery.
The unicast discovery uses the <<modules-transport,transport>> module to perform the discovery.
[float]
[[file-based-hosts-provider]]
===== File-based
In addition to hosts provided by the static `discovery.zen.ping.unicast.hosts`
setting, it is possible to provide a list of hosts via an external file.
Elasticsearch reloads this file when it changes, so that the list of seed nodes
can change dynamically without needing to restart each node. For example, this
gives a convenient mechanism for an Elasticsearch instance that is run in a
Docker container to be dynamically supplied with a list of IP addresses to
connect to for Zen discovery when those IP addresses may not be known at node
startup.
To enable file-based discovery, configure the `file` hosts provider as follows:
[source,txt]
----------------------------------------------------------------
discovery.zen.hosts_provider: file
----------------------------------------------------------------
Then create a file at `$ES_PATH_CONF/unicast_hosts.txt` in the format described
below. Any time a change is made to the `unicast_hosts.txt` file the new
changes will be picked up by Elasticsearch and the new hosts list will be used.
Note that the file-based discovery plugin augments the unicast hosts list in
`elasticsearch.yml`: if there are valid unicast host entries in
`discovery.zen.ping.unicast.hosts` then they will be used in addition to those
supplied in `unicast_hosts.txt`.
The `discovery.zen.ping.unicast.resolve_timeout` setting also applies to DNS
lookups for nodes specified by address via file-based discovery. This is
specified as a <<time-units, time unit>> and defaults to 5s.
The format of the file is to specify one node entry per line. Each node entry
consists of the host (host name or IP address) and an optional transport port
number. If the port number is specified, is must come immediately after the
host (on the same line) separated by a `:`. If the port number is not
specified, a default value of 9300 is used.
For example, this is an example of `unicast_hosts.txt` for a cluster with four
nodes that participate in unicast discovery, some of which are not running on
the default port:
[source,txt]
----------------------------------------------------------------
10.10.10.5
10.10.10.6:9305
10.10.10.5:10005
# an IPv6 address
[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9301
----------------------------------------------------------------
Host names are allowed instead of IP addresses (similar to
`discovery.zen.ping.unicast.hosts`), and IPv6 addresses must be specified in
brackets with the port coming after the brackets.
It is also possible to add comments to this file. All comments must appear on
their lines starting with `#` (i.e. comments cannot start in the middle of a
line).
[float]
[[master-election]]
==== Master Election
As part of the ping process a master of the cluster is either
elected or joined to. This is done automatically. The
`discovery.zen.ping_timeout` (which defaults to `3s`) determines how long the node
will wait before deciding on starting an election or joining an existing cluster.
Three pings will be sent over this timeout interval. In case where no decision can be
reached after the timeout, the pinging process restarts.
In slow or congested networks, three seconds might not be enough for a node to become
aware of the other nodes in its environment before making an election decision.
Increasing the timeout should be done with care in that case, as it will slow down the
election process.
Once a node decides to join an existing formed cluster, it
will send a join request to the master (`discovery.zen.join_timeout`)
with a timeout defaulting at 20 times the ping timeout.
As part of the ping process a master of the cluster is either elected or joined
to. This is done automatically. The `discovery.zen.ping_timeout` (which defaults
to `3s`) determines how long the node will wait before deciding on starting an
election or joining an existing cluster. Three pings will be sent over this
timeout interval. In case where no decision can be reached after the timeout,
the pinging process restarts. In slow or congested networks, three seconds
might not be enough for a node to become aware of the other nodes in its
environment before making an election decision. Increasing the timeout should
be done with care in that case, as it will slow down the election process. Once
a node decides to join an existing formed cluster, it will send a join request
to the master (`discovery.zen.join_timeout`) with a timeout defaulting at 20
times the ping timeout.
When the master node stops or has encountered a problem, the cluster nodes
start pinging again and will elect a new master. This pinging round also
serves as a protection against (partial) network failures where a node may unjustly
think that the master has failed. In this case the node will simply hear from
other nodes about the currently active master.
When the master node stops or has encountered a problem, the cluster nodes start
pinging again and will elect a new master. This pinging round also serves as a
protection against (partial) network failures where a node may unjustly think
that the master has failed. In this case the node will simply hear from other
nodes about the currently active master.
If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from nodes that are not master
eligible (nodes where `node.master` is `false`) are ignored during master election; the default value is
If `discovery.zen.master_election.ignore_non_master_pings` is `true`, pings from
nodes that are not master eligible (nodes where `node.master` is `false`) are
ignored during master election; the default value is `false`.
Nodes can be excluded from becoming a master by setting `node.master` to
`false`.
Nodes can be excluded from becoming a master by setting `node.master` to `false`.
The `discovery.zen.minimum_master_nodes` sets the minimum
number of master eligible nodes that need to join a newly elected master in order for an election to
complete and for the elected node to accept its mastership. The same setting controls the minimum number of
active master eligible nodes that should be a part of any active cluster. If this requirement is not met the
active master node will step down and a new master election will begin.
The `discovery.zen.minimum_master_nodes` sets the minimum number of master
eligible nodes that need to join a newly elected master in order for an election
to complete and for the elected node to accept its mastership. The same setting
controls the minimum number of active master eligible nodes that should be a
part of any active cluster. If this requirement is not met the active master
node will step down and a new master election will begin.
This setting must be set to a <<minimum_master_nodes,quorum>> of your master
eligible nodes. It is recommended to avoid having only two master eligible
nodes, since a quorum of two is two. Therefore, a loss of either master
eligible node will result in an inoperable cluster.
nodes, since a quorum of two is two. Therefore, a loss of either master eligible
node will result in an inoperable cluster.
[float]
[[fault-detection]]
==== Fault Detection
There are two fault detection processes running. The first is by the
master, to ping all the other nodes in the cluster and verify that they
are alive. And on the other end, each node pings to master to verify if
its still alive or an election process needs to be initiated.
There are two fault detection processes running. The first is by the master, to
ping all the other nodes in the cluster and verify that they are alive. And on
the other end, each node pings to master to verify if its still alive or an
election process needs to be initiated.
The following settings control the fault detection process using the
`discovery.zen.fd` prefix:
@ -116,19 +184,21 @@ considered failed. Defaults to `3`.
The master node is the only node in a cluster that can make changes to the
cluster state. The master node processes one cluster state update at a time,
applies the required changes and publishes the updated cluster state to all
the other nodes in the cluster. Each node receives the publish message, acknowledges
it, but does *not* yet apply it. If the master does not receive acknowledgement from
at least `discovery.zen.minimum_master_nodes` nodes within a certain time (controlled by
the `discovery.zen.commit_timeout` setting and defaults to 30 seconds) the cluster state
change is rejected.
applies the required changes and publishes the updated cluster state to all the
other nodes in the cluster. Each node receives the publish message, acknowledges
it, but does *not* yet apply it. If the master does not receive acknowledgement
from at least `discovery.zen.minimum_master_nodes` nodes within a certain time
(controlled by the `discovery.zen.commit_timeout` setting and defaults to 30
seconds) the cluster state change is rejected.
Once enough nodes have responded, the cluster state is committed and a message will
be sent to all the nodes. The nodes then proceed to apply the new cluster state to their
internal state. The master node waits for all nodes to respond, up to a timeout, before
going ahead processing the next updates in the queue. The `discovery.zen.publish_timeout` is
set by default to 30 seconds and is measured from the moment the publishing started. Both
timeout settings can be changed dynamically through the <<cluster-update-settings,cluster update settings api>>
Once enough nodes have responded, the cluster state is committed and a message
will be sent to all the nodes. The nodes then proceed to apply the new cluster
state to their internal state. The master node waits for all nodes to respond,
up to a timeout, before going ahead processing the next updates in the queue.
The `discovery.zen.publish_timeout` is set by default to 30 seconds and is
measured from the moment the publishing started. Both timeout settings can be
changed dynamically through the <<cluster-update-settings,cluster update
settings api>>
[float]
[[no-master-block]]
@ -143,10 +213,14 @@ rejected when there is no active master.
The `discovery.zen.no_master_block` setting has two valid options:
[horizontal]
`all`:: All operations on the node--i.e. both read & writes--will be rejected. This also applies for api cluster state
read or write operations, like the get index settings, put mapping and cluster state api.
`write`:: (default) Write operations will be rejected. Read operations will succeed, based on the last known cluster configuration.
This may result in partial reads of stale data as this node may be isolated from the rest of the cluster.
`all`:: All operations on the node--i.e. both read & writes--will be rejected.
This also applies for api cluster state read or write operations, like the get
index settings, put mapping and cluster state api.
`write`:: (default) Write operations will be rejected. Read operations will
succeed, based on the last known cluster configuration. This may result in
partial reads of stale data as this node may be isolated from the rest of the
cluster.
The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis (for example cluster stats, node info and
node stats apis). Requests to these apis will not be blocked and can run on any available node.
The `discovery.zen.no_master_block` setting doesn't apply to nodes-based apis
(for example cluster stats, node info and node stats apis). Requests to these
apis will not be blocked and can run on any available node.

Binary file not shown.

View File

@ -1,6 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.9-all.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-4.10-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionSha256Sum=39e2d5803bbd5eaf6c8efe07067b0e5a00235e8c71318642b2ed262920b27721
distributionSha256Sum=fc049dcbcb245d5892bebae143bd515a78f6a5a93cec99d489b312dc0ce4aad9

View File

@ -255,6 +255,10 @@ public class JarHell {
}
private static void checkClass(Map<String, Path> clazzes, String clazz, Path jarpath) {
if (clazz.equals("module-info") || clazz.endsWith(".module-info")) {
// Ignore jigsaw module descriptions
return;
}
Path previous = clazzes.put(clazz, jarpath);
if (previous != null) {
if (previous.equals(jarpath)) {

View File

@ -76,6 +76,28 @@ public class JarHellTests extends ESTestCase {
}
}
public void testModuleInfo() throws Exception {
Path dir = createTempDir();
JarHell.checkJarHell(
asSet(
makeJar(dir, "foo.jar", null, "module-info.class"),
makeJar(dir, "bar.jar", null, "module-info.class")
),
logger::debug
);
}
public void testModuleInfoPackage() throws Exception {
Path dir = createTempDir();
JarHell.checkJarHell(
asSet(
makeJar(dir, "foo.jar", null, "foo/bar/module-info.class"),
makeJar(dir, "bar.jar", null, "foo/bar/module-info.class")
),
logger::debug
);
}
public void testDirsOnClasspath() throws Exception {
Path dir1 = createTempDir();
Path dir2 = createTempDir();

View File

@ -28,6 +28,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.elasticsearch.script.ScriptService;
import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException;
import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty;
@ -96,6 +97,13 @@ public final class ForEachProcessor extends AbstractProcessor {
}
public static final class Factory implements Processor.Factory {
private final ScriptService scriptService;
Factory(ScriptService scriptService) {
this.scriptService = scriptService;
}
@Override
public ForEachProcessor create(Map<String, Processor.Factory> factories, String tag,
Map<String, Object> config) throws Exception {
@ -107,7 +115,8 @@ public final class ForEachProcessor extends AbstractProcessor {
throw newConfigurationException(TYPE, tag, "processor", "Must specify exactly one processor type");
}
Map.Entry<String, Map<String, Object>> entry = entries.iterator().next();
Processor processor = ConfigurationUtils.readProcessor(factories, entry.getKey(), entry.getValue());
Processor processor =
ConfigurationUtils.readProcessor(factories, scriptService, entry.getKey(), entry.getValue());
return new ForEachProcessor(tag, field, processor, ignoreMissing);
}
}

View File

@ -72,7 +72,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl
processors.put(ConvertProcessor.TYPE, new ConvertProcessor.Factory());
processors.put(GsubProcessor.TYPE, new GsubProcessor.Factory());
processors.put(FailProcessor.TYPE, new FailProcessor.Factory(parameters.scriptService));
processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory());
processors.put(ForEachProcessor.TYPE, new ForEachProcessor.Factory(parameters.scriptService));
processors.put(DateIndexNameProcessor.TYPE, new DateIndexNameProcessor.Factory(parameters.scriptService));
processors.put(SortProcessor.TYPE, new SortProcessor.Factory());
processors.put(GrokProcessor.TYPE, new GrokProcessor.Factory(GROK_PATTERNS, createGrokThreadWatchdog(parameters)));

View File

@ -22,6 +22,7 @@ package org.elasticsearch.ingest.common;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ingest.Processor;
import org.elasticsearch.ingest.TestProcessor;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matchers;
@ -30,14 +31,17 @@ import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Mockito.mock;
public class ForEachProcessorFactoryTests extends ESTestCase {
private final ScriptService scriptService = mock(ScriptService.class);
public void testCreate() throws Exception {
Processor processor = new TestProcessor(ingestDocument -> { });
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_name", (r, t, c) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory();
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
@ -53,7 +57,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase {
Processor processor = new TestProcessor(ingestDocument -> { });
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_name", (r, t, c) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory();
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
@ -71,7 +75,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase {
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_first", (r, t, c) -> processor);
registry.put("_second", (r, t, c) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory();
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
@ -84,7 +88,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase {
}
public void testCreateWithNonExistingProcessorType() throws Exception {
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory();
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
config.put("processor", Collections.singletonMap("_name", Collections.emptyMap()));
@ -97,7 +101,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase {
Processor processor = new TestProcessor(ingestDocument -> { });
Map<String, Processor.Factory> registry = new HashMap<>();
registry.put("_name", (r, t, c) -> processor);
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory();
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("processor", Collections.singletonList(Collections.singletonMap("_name", Collections.emptyMap())));
Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(registry, null, config));
@ -105,7 +109,7 @@ public class ForEachProcessorFactoryTests extends ESTestCase {
}
public void testCreateWithMissingProcessor() {
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory();
ForEachProcessor.Factory forEachFactory = new ForEachProcessor.Factory(scriptService);
Map<String, Object> config = new HashMap<>();
config.put("field", "_field");
Exception exception = expectThrows(Exception.class, () -> forEachFactory.create(Collections.emptyMap(), null, config));

View File

@ -0,0 +1,81 @@
---
teardown:
- do:
ingest.delete_pipeline:
id: "my_pipeline"
ignore: 404
---
"Test conditional processor fulfilled condition":
- do:
ingest.put_pipeline:
id: "my_pipeline"
body: >
{
"description": "_description",
"processors": [
{
"bytes" : {
"if" : "ctx.conditional_field == 'bar'",
"field" : "bytes_source_field",
"target_field" : "bytes_target_field"
}
}
]
}
- match: { acknowledged: true }
- do:
index:
index: test
type: test
id: 1
pipeline: "my_pipeline"
body: {bytes_source_field: "1kb", conditional_field: "bar"}
- do:
get:
index: test
type: test
id: 1
- match: { _source.bytes_source_field: "1kb" }
- match: { _source.conditional_field: "bar" }
- match: { _source.bytes_target_field: 1024 }
---
"Test conditional processor unfulfilled condition":
- do:
ingest.put_pipeline:
id: "my_pipeline"
body: >
{
"description": "_description",
"processors": [
{
"bytes" : {
"if" : "ctx.conditional_field == 'foo'",
"field" : "bytes_source_field",
"target_field" : "bytes_target_field"
}
}
]
}
- match: { acknowledged: true }
- do:
index:
index: test
type: test
id: 1
pipeline: "my_pipeline"
body: {bytes_source_field: "1kb", conditional_field: "bar"}
- do:
get:
index: test
type: test
id: 1
- match: { _source.bytes_source_field: "1kb" }
- match: { _source.conditional_field: "bar" }
- is_false: _source.bytes_target_field

View File

@ -26,7 +26,7 @@ public class BindingTest {
this.state = state0 + state1;
}
public int testAddWithState(int stateless) {
return stateless + state;
public int testAddWithState(int istateless, double dstateless) {
return istateless + state + (int)dstateless;
}
}

View File

@ -908,7 +908,7 @@ public final class PainlessLookupBuilder {
int methodTypeParametersSize = javaMethod.getParameterCount();
for (int typeParameterIndex = 0; typeParameterIndex < methodTypeParametersSize; ++typeParameterIndex) {
Class<?> typeParameter = typeParameters.get(typeParameterIndex);
Class<?> typeParameter = typeParameters.get(constructorTypeParametersSize + typeParameterIndex);
if (isValidType(typeParameter) == false) {
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +

View File

@ -177,5 +177,5 @@ class org.elasticsearch.painless.FeatureTest no_import {
# for testing
static {
int testAddWithState(int, int, int) bound_to org.elasticsearch.painless.BindingTest
int testAddWithState(int, int, int, double) bound_to org.elasticsearch.painless.BindingTest
}

View File

@ -28,11 +28,11 @@ import java.util.Map;
public class BindingsTests extends ScriptTestCase {
public void testBasicBinding() {
assertEquals(15, exec("testAddWithState(4, 5, 6)"));
assertEquals(15, exec("testAddWithState(4, 5, 6, 0.0)"));
}
public void testRepeatedBinding() {
String script = "testAddWithState(4, 5, params.test)";
String script = "testAddWithState(4, 5, params.test, 0.0)";
Map<String, Object> params = new HashMap<>();
ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap());
ExecutableScript executableScript = factory.newInstance(params);
@ -48,7 +48,7 @@ public class BindingsTests extends ScriptTestCase {
}
public void testBoundBinding() {
String script = "testAddWithState(4, params.bound, params.test)";
String script = "testAddWithState(4, params.bound, params.test, 0.0)";
Map<String, Object> params = new HashMap<>();
ExecutableScript.Factory factory = scriptEngine.compile(null, script, ExecutableScript.CONTEXT, Collections.emptyMap());
ExecutableScript executableScript = factory.newInstance(params);

View File

@ -19,39 +19,33 @@
package org.elasticsearch.discovery.file;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.transport.TransportService;
import java.nio.file.Path;
import java.util.Collections;
import java.util.Map;
import java.util.function.Supplier;
/**
* Plugin for providing file-based unicast hosts discovery. The list of unicast hosts
* is obtained by reading the {@link FileBasedUnicastHostsProvider#UNICAST_HOSTS_FILE} in
* the {@link Environment#configFile()}/discovery-file directory.
*/
public class FileBasedDiscoveryPlugin extends Plugin implements DiscoveryPlugin {
private final Settings settings;
private final Path configPath;
private final DeprecationLogger deprecationLogger;
static final String DEPRECATION_MESSAGE
= "File-based discovery is now built into Elasticsearch and does not require the discovery-file plugin";
public FileBasedDiscoveryPlugin(Settings settings, Path configPath) {
this.settings = settings;
this.configPath = configPath;
public FileBasedDiscoveryPlugin(Settings settings) {
deprecationLogger = new DeprecationLogger(Loggers.getLogger(this.getClass(), settings));
}
@Override
public Map<String, Supplier<UnicastHostsProvider>> getZenHostsProviders(TransportService transportService,
NetworkService networkService) {
return Collections.singletonMap(
"file",
() -> new FileBasedUnicastHostsProvider(new Environment(settings, configPath)));
deprecationLogger.deprecated(DEPRECATION_MESSAGE);
return Collections.emptyMap();
}
}

View File

@ -1,83 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.discovery.file;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.env.Environment;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* An implementation of {@link UnicastHostsProvider} that reads hosts/ports
* from {@link #UNICAST_HOSTS_FILE}.
*
* Each unicast host/port that is part of the discovery process must be listed on
* a separate line. If the port is left off an entry, a default port of 9300 is
* assumed. An example unicast hosts file could read:
*
* 67.81.244.10
* 67.81.244.11:9305
* 67.81.244.15:9400
*/
class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider {
static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt";
private final Path unicastHostsFilePath;
FileBasedUnicastHostsProvider(Environment environment) {
super(environment.settings());
this.unicastHostsFilePath = environment.configFile().resolve("discovery-file").resolve(UNICAST_HOSTS_FILE);
}
@Override
public List<TransportAddress> buildDynamicHosts(HostsResolver hostsResolver) {
List<String> hostsList;
try (Stream<String> lines = Files.lines(unicastHostsFilePath)) {
hostsList = lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments
.collect(Collectors.toList());
} catch (FileNotFoundException | NoSuchFileException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[discovery-file] Failed to find unicast hosts file [{}]",
unicastHostsFilePath), e);
hostsList = Collections.emptyList();
} catch (IOException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[discovery-file] Error reading unicast hosts file [{}]",
unicastHostsFilePath), e);
hostsList = Collections.emptyList();
}
final List<TransportAddress> dynamicHosts = hostsResolver.resolveHosts(hostsList, 1);
logger.debug("[discovery-file] Using dynamic discovery nodes {}", dynamicHosts);
return dynamicHosts;
}
}

View File

@ -0,0 +1,32 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.discovery.file;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import static org.elasticsearch.discovery.file.FileBasedDiscoveryPlugin.DEPRECATION_MESSAGE;
public class FileBasedDiscoveryPluginDeprecationTests extends ESTestCase {
public void testDeprecationWarning() {
new FileBasedDiscoveryPlugin(Settings.EMPTY).getZenHostsProviders(null, null);
assertWarnings(DEPRECATION_MESSAGE);
}
}

View File

@ -55,8 +55,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final Set<Alias> aliases = new HashSet<>();
private final Map<String, IndexMetaData.Custom> customs = new HashMap<>();
private final Set<ClusterBlock> blocks = new HashSet<>();
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
@ -83,11 +81,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return this;
}
public CreateIndexClusterStateUpdateRequest customs(Map<String, IndexMetaData.Custom> customs) {
this.customs.putAll(customs);
return this;
}
public CreateIndexClusterStateUpdateRequest blocks(Set<ClusterBlock> blocks) {
this.blocks.addAll(blocks);
return this;
@ -146,10 +139,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return aliases;
}
public Map<String, IndexMetaData.Custom> customs() {
return customs;
}
public Set<ClusterBlock> blocks() {
return blocks;
}

View File

@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
@ -58,9 +57,9 @@ import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/**
* A request to create an index. Best created with {@link org.elasticsearch.client.Requests#createIndexRequest(String)}.
@ -87,8 +86,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
private final Set<Alias> aliases = new HashSet<>();
private final Map<String, IndexMetaData.Custom> customs = new HashMap<>();
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexRequest() {
@ -388,18 +385,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
} else if (ALIASES.match(name, deprecationHandler)) {
aliases((Map<String, Object>) entry.getValue());
} else {
// maybe custom?
IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name);
if (proto != null) {
try {
customs.put(name, proto.fromMap((Map<String, Object>) entry.getValue()));
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name);
}
} else {
// found a key which is neither custom defined nor one of the supported ones
throw new ElasticsearchParseException("unknown key [{}] for create index", name);
}
throw new ElasticsearchParseException("unknown key [{}] for create index", name);
}
}
return this;
@ -413,18 +399,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this.aliases;
}
/**
* Adds custom metadata to the index to be created.
*/
public CreateIndexRequest custom(IndexMetaData.Custom custom) {
customs.put(custom.type(), custom);
return this;
}
public Map<String, IndexMetaData.Custom> customs() {
return this.customs;
}
public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}
@ -474,11 +448,13 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
}
mappings.put(type, source);
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in);
customs.put(type, customIndexMetaData);
if (in.getVersion().before(Version.V_6_5_0)) {
// This used to be the size of custom metadata classes
int customSize = in.readVInt();
assert customSize == 0 : "unexpected custom metadata when none is supported";
if (customSize > 0) {
throw new IllegalStateException("unexpected custom metadata when none is supported");
}
}
int aliasesSize = in.readVInt();
for (int i = 0; i < aliasesSize; i++) {
@ -501,10 +477,9 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
out.writeVInt(customs.size());
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
if (out.getVersion().before(Version.V_6_5_0)) {
// Size of custom index metadata, which is removed
out.writeVInt(0);
}
out.writeVInt(aliases.size());
for (Alias alias : aliases) {
@ -542,10 +517,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
alias.toXContent(builder, params);
}
builder.endObject();
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
builder.field(entry.getKey(), entry.getValue(), params);
}
return builder;
}
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
@ -224,14 +223,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this;
}
/**
* Adds custom metadata to the index to be created.
*/
public CreateIndexRequestBuilder addCustom(IndexMetaData.Custom custom) {
request.custom(custom);
return this;
}
/**
* Sets the settings and mappings as a single source.
*/

View File

@ -75,7 +75,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index())
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.settings(request.settings()).mappings(request.mappings())
.aliases(request.aliases()).customs(request.customs())
.aliases(request.aliases())
.waitForActiveShards(request.waitForActiveShards());
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->

View File

@ -185,7 +185,6 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
.masterNodeTimeout(targetIndex.masterNodeTimeout())
.settings(targetIndex.settings())
.aliases(targetIndex.aliases())
.customs(targetIndex.customs())
.waitForActiveShards(targetIndex.waitForActiveShards())
.recoverFrom(metaData.getIndex())
.resizeType(resizeRequest.getResizeType())

View File

@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -61,9 +60,9 @@ import java.util.Set;
import java.util.stream.Collectors;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/**
* A request to create an index template.
@ -88,8 +87,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
private final Set<Alias> aliases = new HashSet<>();
private Map<String, IndexMetaData.Custom> customs = new HashMap<>();
private Integer version;
public PutIndexTemplateRequest() {
@ -353,15 +350,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
} else if (name.equals("aliases")) {
aliases((Map<String, Object>) entry.getValue());
} else {
// maybe custom?
IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name);
if (proto != null) {
try {
customs.put(name, proto.fromMap((Map<String, Object>) entry.getValue()));
} catch (IOException e) {
throw new ElasticsearchParseException("failed to parse custom metadata for [{}]", name);
}
}
throw new ElasticsearchParseException("unknown key [{}] in the template ", name);
}
}
return this;
@ -395,15 +384,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
return source(XContentHelper.convertToMap(source, true, xContentType).v2());
}
public PutIndexTemplateRequest custom(IndexMetaData.Custom custom) {
customs.put(custom.type(), custom);
return this;
}
public Map<String, IndexMetaData.Custom> customs() {
return this.customs;
}
public Set<Alias> aliases() {
return this.aliases;
}
@ -494,11 +474,13 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
String mappingSource = in.readString();
mappings.put(type, mappingSource);
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in);
customs.put(type, customIndexMetaData);
if (in.getVersion().before(Version.V_6_5_0)) {
// Used to be used for custom index metadata
int customSize = in.readVInt();
assert customSize == 0 : "expected not to have any custom metadata";
if (customSize > 0) {
throw new IllegalStateException("unexpected custom metadata when none is supported");
}
}
int aliasesSize = in.readVInt();
for (int i = 0; i < aliasesSize; i++) {
@ -525,10 +507,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
out.writeVInt(customs.size());
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
if (out.getVersion().before(Version.V_6_5_0)) {
out.writeVInt(0);
}
out.writeVInt(aliases.size());
for (Alias alias : aliases) {
@ -565,10 +545,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
}
builder.endObject();
for (Map.Entry<String, IndexMetaData.Custom> entry : customs.entrySet()) {
builder.field(entry.getKey(), entry.getValue(), params);
}
return builder;
}
}

View File

@ -84,7 +84,6 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
.settings(templateSettingsBuilder.build())
.mappings(request.mappings())
.aliases(request.aliases())
.customs(request.customs())
.create(request.create())
.masterTimeout(request.masterNodeTimeout())
.version(request.version()),

View File

@ -166,14 +166,14 @@ public class FieldCapabilities implements Writeable, ToXContentObject {
}
/**
* Whether this field is indexed for search on all indices.
* Whether this field can be aggregated on all indices.
*/
public boolean isAggregatable() {
return isAggregatable;
}
/**
* Whether this field can be aggregated on all indices.
* Whether this field is indexed for search on all indices.
*/
public boolean isSearchable() {
return isSearchable;

View File

@ -111,7 +111,6 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
}
/**
*
* The list of indices to lookup
*/
public FieldCapabilitiesRequest indices(String... indices) {

View File

@ -35,6 +35,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
/**
@ -56,15 +57,15 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
private FieldCapabilitiesResponse(Map<String, Map<String, FieldCapabilities>> responseMap,
List<FieldCapabilitiesIndexResponse> indexResponses) {
this.responseMap = responseMap;
this.indexResponses = indexResponses;
this.responseMap = Objects.requireNonNull(responseMap);
this.indexResponses = Objects.requireNonNull(indexResponses);
}
/**
* Used for serialization
*/
FieldCapabilitiesResponse() {
this.responseMap = Collections.emptyMap();
this(Collections.emptyMap(), Collections.emptyList());
}
/**
@ -81,6 +82,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
List<FieldCapabilitiesIndexResponse> getIndexResponses() {
return indexResponses;
}
/**
*
* Get the field capabilities per type for the provided {@code field}.

View File

@ -90,7 +90,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
}
};
if (totalNumRequest == 0) {
listener.onResponse(new FieldCapabilitiesResponse());
listener.onResponse(new FieldCapabilitiesResponse(Collections.emptyMap()));
} else {
ActionListener<FieldCapabilitiesIndexResponse> innerListener = new ActionListener<FieldCapabilitiesIndexResponse>() {
@Override

View File

@ -171,9 +171,11 @@ public class SimulatePipelineRequest extends ActionRequest implements ToXContent
return new Parsed(pipeline, ingestDocumentList, verbose);
}
static Parsed parse(Map<String, Object> config, boolean verbose, IngestService pipelineStore) throws Exception {
static Parsed parse(Map<String, Object> config, boolean verbose, IngestService ingestService) throws Exception {
Map<String, Object> pipelineConfig = ConfigurationUtils.readMap(null, null, config, Fields.PIPELINE);
Pipeline pipeline = Pipeline.create(SIMULATED_PIPELINE_ID, pipelineConfig, pipelineStore.getProcessorFactories());
Pipeline pipeline = Pipeline.create(
SIMULATED_PIPELINE_ID, pipelineConfig, ingestService.getProcessorFactories(), ingestService.getScriptService()
);
List<IngestDocument> ingestDocumentList = parseDocs(config);
return new Parsed(pipeline, ingestDocumentList, verbose);
}

View File

@ -455,7 +455,7 @@ public interface Client extends ElasticsearchClient, Releasable {
/**
* Builder for the field capabilities request.
*/
FieldCapabilitiesRequestBuilder prepareFieldCaps();
FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices);
/**
* An action that returns the field capabilities from the provided request

View File

@ -651,8 +651,8 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
public FieldCapabilitiesRequestBuilder prepareFieldCaps() {
return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE);
public FieldCapabilitiesRequestBuilder prepareFieldCaps(String... indices) {
return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE, indices);
}
static class Admin implements AdminClient {

View File

@ -0,0 +1,188 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.Diffable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* This is a {@code Map<String, String>} that implements AbstractDiffable so it
* can be used for cluster state purposes
*/
public class DiffableStringMap extends AbstractMap<String, String> implements Diffable<DiffableStringMap> {
private final Map<String, String> innerMap;
DiffableStringMap(final Map<String, String> map) {
this.innerMap = map;
}
@SuppressWarnings("unchecked")
DiffableStringMap(final StreamInput in) throws IOException {
this.innerMap = (Map<String, String>) (Map) in.readMap();
}
@Override
public String put(String key, String value) {
return innerMap.put(key, value);
}
@Override
public Set<Entry<String, String>> entrySet() {
return innerMap.entrySet();
}
@Override
@SuppressWarnings("unchecked")
public void writeTo(StreamOutput out) throws IOException {
out.writeMap((Map<String, Object>) (Map) innerMap);
}
@Override
public Diff<DiffableStringMap> diff(DiffableStringMap previousState) {
return new DiffableStringMapDiff(previousState, this);
}
public static Diff<DiffableStringMap> readDiffFrom(StreamInput in) throws IOException {
return new DiffableStringMapDiff(in);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj instanceof DiffableStringMap) {
DiffableStringMap other = (DiffableStringMap) obj;
return innerMap.equals(other.innerMap);
} else if (obj instanceof Map) {
Map other = (Map) obj;
return innerMap.equals(other);
} else {
return false;
}
}
@Override
public int hashCode() {
return innerMap.hashCode();
}
@Override
public String toString() {
return "DiffableStringMap[" + innerMap.toString() + "]";
}
/**
* Represents differences between two DiffableStringMaps.
*/
public static class DiffableStringMapDiff implements Diff<DiffableStringMap> {
private final List<String> deletes;
private final Map<String, String> upserts; // diffs also become upserts
private DiffableStringMapDiff(DiffableStringMap before, DiffableStringMap after) {
final List<String> tempDeletes = new ArrayList<>();
final Map<String, String> tempUpserts = new HashMap<>();
for (String key : before.keySet()) {
if (after.containsKey(key) == false) {
tempDeletes.add(key);
}
}
for (Map.Entry<String, String> partIter : after.entrySet()) {
String beforePart = before.get(partIter.getKey());
if (beforePart == null) {
tempUpserts.put(partIter.getKey(), partIter.getValue());
} else if (partIter.getValue().equals(beforePart) == false) {
tempUpserts.put(partIter.getKey(), partIter.getValue());
}
}
deletes = tempDeletes;
upserts = tempUpserts;
}
private DiffableStringMapDiff(StreamInput in) throws IOException {
deletes = new ArrayList<>();
upserts = new HashMap<>();
int deletesCount = in.readVInt();
for (int i = 0; i < deletesCount; i++) {
deletes.add(in.readString());
}
int upsertsCount = in.readVInt();
for (int i = 0; i < upsertsCount; i++) {
String key = in.readString();
String newValue = in.readString();
upserts.put(key, newValue);
}
}
public List<String> getDeletes() {
return deletes;
}
public Map<String, Diff<String>> getDiffs() {
return Collections.emptyMap();
}
public Map<String, String> getUpserts() {
return upserts;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(deletes.size());
for (String delete : deletes) {
out.writeString(delete);
}
out.writeVInt(upserts.size());
for (Map.Entry<String, String> entry : upserts.entrySet()) {
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
}
@Override
public DiffableStringMap apply(DiffableStringMap part) {
Map<String, String> builder = new HashMap<>(part.innerMap);
List<String> deletes = getDeletes();
for (String delete : deletes) {
builder.remove(delete);
}
assert getDiffs().size() == 0 : "there should never be diffs for DiffableStringMap";
for (Map.Entry<String, String> upsert : upserts.entrySet()) {
builder.put(upsert.getKey(), upsert.getValue());
}
return new DiffableStringMap(builder);
}
}
}

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.LongArrayList;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.Assertions;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.rollover.RolloverInfo;
@ -65,7 +64,6 @@ import java.time.ZonedDateTime;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Locale;
@ -81,59 +79,6 @@ import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragment {
/**
* This class will be removed in v7.0
*/
@Deprecated
public interface Custom extends Diffable<Custom>, ToXContent {
String type();
Custom fromMap(Map<String, Object> map) throws IOException;
Custom fromXContent(XContentParser parser) throws IOException;
/**
* Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput
*/
Diff<Custom> readDiffFrom(StreamInput in) throws IOException;
/**
* Reads an object of this type from the provided {@linkplain StreamInput}. The receiving instance remains unchanged.
*/
Custom readFrom(StreamInput in) throws IOException;
/**
* Merges from this to another, with this being more important, i.e., if something exists in this and another,
* this will prevail.
*/
Custom mergeWith(Custom another);
}
public static Map<String, Custom> customPrototypes = new HashMap<>();
/**
* Register a custom index meta data factory. Make sure to call it from a static block.
*/
public static void registerPrototype(String type, Custom proto) {
customPrototypes.put(type, proto);
}
@Nullable
public static <T extends Custom> T lookupPrototype(String type) {
//noinspection unchecked
return (T) customPrototypes.get(type);
}
public static <T extends Custom> T lookupPrototypeSafe(String type) {
//noinspection unchecked
T proto = (T) customPrototypes.get(type);
if (proto == null) {
throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]");
}
return proto;
}
public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ));
public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE));
@ -324,7 +269,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
private final ImmutableOpenMap<String, MappingMetaData> mappings;
private final ImmutableOpenMap<String, Custom> customs;
private final ImmutableOpenMap<String, DiffableStringMap> customData;
private final ImmutableOpenIntMap<Set<String>> inSyncAllocationIds;
@ -343,7 +288,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
private IndexMetaData(Index index, long version, long mappingVersion, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings,
ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases,
ImmutableOpenMap<String, Custom> customs, ImmutableOpenIntMap<Set<String>> inSyncAllocationIds,
ImmutableOpenMap<String, DiffableStringMap> customData, ImmutableOpenIntMap<Set<String>> inSyncAllocationIds,
DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters,
Version indexCreatedVersion, Version indexUpgradedVersion,
int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap<String, RolloverInfo> rolloverInfos) {
@ -360,7 +305,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
this.settings = settings;
this.mappings = mappings;
this.customs = customs;
this.customData = customData;
this.aliases = aliases;
this.inSyncAllocationIds = inSyncAllocationIds;
this.requireFilters = requireFilters;
@ -485,22 +430,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
return mappings.get(mappingType);
}
// we keep the shrink settings for BWC - this can be removed in 8.0
// we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0
public static final String INDEX_SHRINK_SOURCE_UUID_KEY = "index.shrink.source.uuid";
public static final String INDEX_SHRINK_SOURCE_NAME_KEY = "index.shrink.source.name";
public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid";
public static final String INDEX_RESIZE_SOURCE_NAME_KEY = "index.resize.source.name";
public static final Setting<String> INDEX_SHRINK_SOURCE_UUID = Setting.simpleString(INDEX_SHRINK_SOURCE_UUID_KEY);
public static final Setting<String> INDEX_SHRINK_SOURCE_NAME = Setting.simpleString(INDEX_SHRINK_SOURCE_NAME_KEY);
public static final Setting<String> INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY,
INDEX_SHRINK_SOURCE_UUID);
public static final Setting<String> INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY,
INDEX_SHRINK_SOURCE_NAME);
public static final Setting<String> INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY);
public static final Setting<String> INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY);
public Index getResizeSourceIndex() {
return INDEX_RESIZE_SOURCE_UUID.exists(settings) || INDEX_SHRINK_SOURCE_UUID.exists(settings)
? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), INDEX_RESIZE_SOURCE_UUID.get(settings)) : null;
return INDEX_RESIZE_SOURCE_UUID.exists(settings) ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings),
INDEX_RESIZE_SOURCE_UUID.get(settings)) : null;
}
/**
@ -519,13 +456,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
return mappings.get(MapperService.DEFAULT_MAPPING);
}
public ImmutableOpenMap<String, Custom> getCustoms() {
return this.customs;
ImmutableOpenMap<String, DiffableStringMap> getCustomData() {
return this.customData;
}
@SuppressWarnings("unchecked")
public <T extends Custom> T custom(String type) {
return (T) customs.get(type);
public Map<String, String> getCustomData(final String key) {
return Collections.unmodifiableMap(this.customData.get(key));
}
public ImmutableOpenIntMap<Set<String>> getInSyncAllocationIds() {
@ -591,7 +527,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
if (state != that.state) {
return false;
}
if (!customs.equals(that.customs)) {
if (!customData.equals(that.customData)) {
return false;
}
if (routingNumShards != that.routingNumShards) {
@ -620,7 +556,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
result = 31 * result + aliases.hashCode();
result = 31 * result + settings.hashCode();
result = 31 * result + mappings.hashCode();
result = 31 * result + customs.hashCode();
result = 31 * result + customData.hashCode();
result = 31 * result + Long.hashCode(routingFactor);
result = 31 * result + Long.hashCode(routingNumShards);
result = 31 * result + Arrays.hashCode(primaryTerms);
@ -660,7 +596,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
private final Settings settings;
private final Diff<ImmutableOpenMap<String, MappingMetaData>> mappings;
private final Diff<ImmutableOpenMap<String, AliasMetaData>> aliases;
private final Diff<ImmutableOpenMap<String, Custom>> customs;
private final Diff<ImmutableOpenMap<String, DiffableStringMap>> customData;
private final Diff<ImmutableOpenIntMap<Set<String>>> inSyncAllocationIds;
private final Diff<ImmutableOpenMap<String, RolloverInfo>> rolloverInfos;
@ -674,7 +610,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
primaryTerms = after.primaryTerms;
mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer());
aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer());
customs = DiffableUtils.diff(before.customs, after.customs, DiffableUtils.getStringKeySerializer());
customData = DiffableUtils.diff(before.customData, after.customData, DiffableUtils.getStringKeySerializer());
inSyncAllocationIds = DiffableUtils.diff(before.inSyncAllocationIds, after.inSyncAllocationIds,
DiffableUtils.getVIntKeySerializer(), DiffableUtils.StringSetValueSerializer.getInstance());
rolloverInfos = DiffableUtils.diff(before.rolloverInfos, after.rolloverInfos, DiffableUtils.getStringKeySerializer());
@ -696,18 +632,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
MappingMetaData::readDiffFrom);
aliases = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), AliasMetaData::new,
AliasMetaData::readDiffFrom);
customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(),
new DiffableUtils.DiffableValueSerializer<String, Custom>() {
@Override
public Custom read(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readFrom(in);
}
@Override
public Diff<Custom> readDiff(StreamInput in, String key) throws IOException {
return lookupPrototypeSafe(key).readDiffFrom(in);
}
});
customData = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DiffableStringMap::new,
DiffableStringMap::readDiffFrom);
inSyncAllocationIds = DiffableUtils.readImmutableOpenIntMapDiff(in, DiffableUtils.getVIntKeySerializer(),
DiffableUtils.StringSetValueSerializer.getInstance());
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
@ -732,7 +658,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
out.writeVLongArray(primaryTerms);
mappings.writeTo(out);
aliases.writeTo(out);
customs.writeTo(out);
customData.writeTo(out);
inSyncAllocationIds.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
rolloverInfos.writeTo(out);
@ -750,7 +676,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
builder.primaryTerms(primaryTerms);
builder.mappings.putAll(mappings.apply(part.mappings));
builder.aliases.putAll(aliases.apply(part.aliases));
builder.customs.putAll(customs.apply(part.customs));
builder.customMetaData.putAll(customData.apply(part.customData));
builder.inSyncAllocationIds.putAll(inSyncAllocationIds.apply(part.inSyncAllocationIds));
builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos));
return builder.build();
@ -780,10 +706,17 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
builder.putAlias(aliasMd);
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
if (in.getVersion().onOrAfter(Version.V_6_5_0)) {
for (int i = 0; i < customSize; i++) {
String key = in.readString();
DiffableStringMap custom = new DiffableStringMap(in);
builder.putCustom(key, custom);
}
} else {
assert customSize == 0 : "expected no custom index metadata";
if (customSize > 0) {
throw new IllegalStateException("unexpected custom metadata when none is supported");
}
}
int inSyncAllocationIdsSize = in.readVInt();
for (int i = 0; i < inSyncAllocationIdsSize; i++) {
@ -819,10 +752,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
for (ObjectCursor<AliasMetaData> cursor : aliases.values()) {
cursor.value.writeTo(out);
}
out.writeVInt(customs.size());
for (ObjectObjectCursor<String, Custom> cursor : customs) {
out.writeString(cursor.key);
cursor.value.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_5_0)) {
out.writeVInt(customData.size());
for (final ObjectObjectCursor<String, DiffableStringMap> cursor : customData) {
out.writeString(cursor.key);
cursor.value.writeTo(out);
}
} else {
out.writeVInt(0);
}
out.writeVInt(inSyncAllocationIds.size());
for (IntObjectCursor<Set<String>> cursor : inSyncAllocationIds) {
@ -855,7 +792,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
private final ImmutableOpenMap.Builder<String, MappingMetaData> mappings;
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
private final ImmutableOpenMap.Builder<String, Custom> customs;
private final ImmutableOpenMap.Builder<String, DiffableStringMap> customMetaData;
private final ImmutableOpenIntMap.Builder<Set<String>> inSyncAllocationIds;
private final ImmutableOpenMap.Builder<String, RolloverInfo> rolloverInfos;
private Integer routingNumShards;
@ -864,7 +801,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
this.index = index;
this.mappings = ImmutableOpenMap.builder();
this.aliases = ImmutableOpenMap.builder();
this.customs = ImmutableOpenMap.builder();
this.customMetaData = ImmutableOpenMap.builder();
this.inSyncAllocationIds = ImmutableOpenIntMap.builder();
this.rolloverInfos = ImmutableOpenMap.builder();
}
@ -878,7 +815,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
this.primaryTerms = indexMetaData.primaryTerms.clone();
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
this.customMetaData = ImmutableOpenMap.builder(indexMetaData.customData);
this.routingNumShards = indexMetaData.routingNumShards;
this.inSyncAllocationIds = ImmutableOpenIntMap.builder(indexMetaData.inSyncAllocationIds);
this.rolloverInfos = ImmutableOpenMap.builder(indexMetaData.rolloverInfos);
@ -1008,8 +945,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
return this;
}
public Builder putCustom(String type, Custom customIndexMetaData) {
this.customs.put(type, customIndexMetaData);
public Builder putCustom(String type, Map<String, String> customIndexMetaData) {
this.customMetaData.put(type, new DiffableStringMap(customIndexMetaData));
return this;
}
@ -1177,7 +1114,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
return new IndexMetaData(new Index(index, uuid), version, mappingVersion, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(),
tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
tmpAliases.build(), customMetaData.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters,
indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build());
}
@ -1205,10 +1142,9 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
}
builder.endArray();
for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.getCustoms()) {
builder.startObject(cursor.key);
cursor.value.toXContent(builder, params);
builder.endObject();
for (ObjectObjectCursor<String, DiffableStringMap> cursor : indexMetaData.customData) {
builder.field(cursor.key);
builder.map(cursor.value);
}
builder.startObject(KEY_ALIASES);
@ -1317,15 +1253,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
assert Version.CURRENT.major <= 5;
parser.skipChildren();
} else {
// check if its a custom index metadata
Custom proto = lookupPrototype(currentFieldName);
if (proto == null) {
//TODO warn
parser.skipChildren();
} else {
Custom custom = proto.fromXContent(parser);
builder.putCustom(custom.type(), custom);
}
// assume it's custom index metadata
builder.putCustom(currentFieldName, parser.mapStrings());
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (KEY_MAPPINGS.equals(currentFieldName)) {

View File

@ -20,7 +20,7 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.cluster.Diff;
@ -87,13 +87,10 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
private final ImmutableOpenMap<String, AliasMetaData> aliases;
private final ImmutableOpenMap<String, IndexMetaData.Custom> customs;
public IndexTemplateMetaData(String name, int order, Integer version,
List<String> patterns, Settings settings,
ImmutableOpenMap<String, CompressedXContent> mappings,
ImmutableOpenMap<String, AliasMetaData> aliases,
ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
ImmutableOpenMap<String, AliasMetaData> aliases) {
if (patterns == null || patterns.isEmpty()) {
throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns);
}
@ -104,7 +101,6 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
this.settings = settings;
this.mappings = mappings;
this.aliases = aliases;
this.customs = customs;
}
public String name() {
@ -165,19 +161,6 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
return this.aliases;
}
public ImmutableOpenMap<String, IndexMetaData.Custom> customs() {
return this.customs;
}
public ImmutableOpenMap<String, IndexMetaData.Custom> getCustoms() {
return this.customs;
}
@SuppressWarnings("unchecked")
public <T extends IndexMetaData.Custom> T custom(String type) {
return (T) customs.get(type);
}
public static Builder builder(String name) {
return new Builder(name);
}
@ -227,11 +210,13 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
AliasMetaData aliasMd = new AliasMetaData(in);
builder.putAlias(aliasMd);
}
int customSize = in.readVInt();
for (int i = 0; i < customSize; i++) {
String type = in.readString();
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in);
builder.putCustom(type, customIndexMetaData);
if (in.getVersion().before(Version.V_6_5_0)) {
// Previously we allowed custom metadata
int customSize = in.readVInt();
assert customSize == 0 : "expected no custom metadata";
if (customSize > 0) {
throw new IllegalStateException("unexpected custom metadata when none is supported");
}
}
builder.version(in.readOptionalVInt());
return builder.build();
@ -260,10 +245,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
for (ObjectCursor<AliasMetaData> cursor : aliases.values()) {
cursor.value.writeTo(out);
}
out.writeVInt(customs.size());
for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : customs) {
out.writeString(cursor.key);
cursor.value.writeTo(out);
if (out.getVersion().before(Version.V_6_5_0)) {
out.writeVInt(0);
}
out.writeOptionalVInt(version);
}
@ -272,9 +255,6 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
private static final Set<String> VALID_FIELDS = Sets.newHashSet(
"template", "order", "mappings", "settings", "index_patterns", "aliases", "version");
static {
VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet());
}
private String name;
@ -290,13 +270,10 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
private final ImmutableOpenMap.Builder<String, AliasMetaData> aliases;
private final ImmutableOpenMap.Builder<String, IndexMetaData.Custom> customs;
public Builder(String name) {
this.name = name;
mappings = ImmutableOpenMap.builder();
aliases = ImmutableOpenMap.builder();
customs = ImmutableOpenMap.builder();
}
public Builder(IndexTemplateMetaData indexTemplateMetaData) {
@ -308,7 +285,6 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
mappings = ImmutableOpenMap.builder(indexTemplateMetaData.mappings());
aliases = ImmutableOpenMap.builder(indexTemplateMetaData.aliases());
customs = ImmutableOpenMap.builder(indexTemplateMetaData.customs());
}
public Builder order(int order) {
@ -362,23 +338,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
return this;
}
public Builder putCustom(String type, IndexMetaData.Custom customIndexMetaData) {
this.customs.put(type, customIndexMetaData);
return this;
}
public Builder removeCustom(String type) {
this.customs.remove(type);
return this;
}
public IndexMetaData.Custom getCustom(String type) {
return this.customs.get(type);
}
public IndexTemplateMetaData build() {
return new IndexTemplateMetaData(name, order, version, indexPatterns, settings, mappings.build(),
aliases.build(), customs.build());
return new IndexTemplateMetaData(name, order, version, indexPatterns, settings, mappings.build(), aliases.build());
}
public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params)
@ -425,12 +386,6 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
builder.endArray();
}
for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
builder.startObject(cursor.key);
cursor.value.toXContent(builder, params);
builder.endObject();
}
builder.startObject("aliases");
for (ObjectCursor<AliasMetaData> cursor : indexTemplateMetaData.aliases().values()) {
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
@ -468,15 +423,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
builder.putAlias(AliasMetaData.Builder.fromXContent(parser));
}
} else {
// check if its a custom index metadata
IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(currentFieldName);
if (proto == null) {
//TODO warn
parser.skipChildren();
} else {
IndexMetaData.Custom custom = proto.fromXContent(parser);
builder.putCustom(custom.type(), custom);
}
throw new ElasticsearchParseException("unknown key [{}] for index template", currentFieldName);
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("mappings".equals(currentFieldName)) {

View File

@ -38,7 +38,6 @@ import org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData.Custom;
import org.elasticsearch.cluster.metadata.IndexMetaData.State;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@ -287,7 +286,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
List<IndexTemplateMetaData> templates =
MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index());
Map<String, Custom> customs = new HashMap<>();
Map<String, Map<String, String>> customs = new HashMap<>();
// add the request mapping
Map<String, Map<String, Object>> mappings = new HashMap<>();
@ -300,10 +299,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
}
for (Map.Entry<String, Custom> entry : request.customs().entrySet()) {
customs.put(entry.getKey(), entry.getValue());
}
final Index recoverFromIndex = request.recoverFrom();
if (recoverFromIndex == null) {
@ -320,18 +315,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
MapperService.parseMapping(xContentRegistry, mappingString));
}
}
// handle custom
for (ObjectObjectCursor<String, Custom> cursor : template.customs()) {
String type = cursor.key;
IndexMetaData.Custom custom = cursor.value;
IndexMetaData.Custom existing = customs.get(type);
if (existing == null) {
customs.put(type, custom);
} else {
IndexMetaData.Custom merged = existing.mergeWith(custom);
customs.put(type, merged);
}
}
//handle aliases
for (ObjectObjectCursor<String, AliasMetaData> cursor : template.aliases()) {
AliasMetaData aliasMetaData = cursor.value;
@ -519,7 +502,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
indexMetaDataBuilder.putAlias(aliasMetaData);
}
for (Map.Entry<String, Custom> customEntry : customs.entrySet()) {
for (Map.Entry<String, Map<String, String>> customEntry : customs.entrySet()) {
indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue());
}
@ -723,10 +706,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
.put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id",
Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray()))
// we only try once and then give up with a shrink index
.put("index.allocation.max_retries", 1)
// we add the legacy way of specifying it here for BWC. We can remove this once it's backported to 6.x
.put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), resizeSourceIndex.getName())
.put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID());
.put("index.allocation.max_retries", 1);
} else if (type == ResizeType.SPLIT) {
validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build());
} else {

View File

@ -179,9 +179,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
.indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build();
templateBuilder.putAlias(aliasMetaData);
}
for (Map.Entry<String, IndexMetaData.Custom> entry : request.customs.entrySet()) {
templateBuilder.putCustom(entry.getKey(), entry.getValue());
}
IndexTemplateMetaData template = templateBuilder.build();
MetaData.Builder builder = MetaData.builder(currentState.metaData()).put(template);
@ -339,7 +336,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
Settings settings = Settings.Builder.EMPTY_SETTINGS;
Map<String, String> mappings = new HashMap<>();
List<Alias> aliases = new ArrayList<>();
Map<String, IndexMetaData.Custom> customs = new HashMap<>();
TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT;
@ -378,11 +374,6 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
return this;
}
public PutRequest customs(Map<String, IndexMetaData.Custom> customs) {
this.customs.putAll(customs);
return this;
}
public PutRequest putMapping(String mappingType, String mappingSource) {
mappings.put(mappingType, mappingSource);
return this;

View File

@ -98,7 +98,7 @@ public class Lucene {
assert annotation == null : "DocValuesFormat " + LATEST_DOC_VALUES_FORMAT + " is deprecated" ;
}
public static final String SOFT_DELETE_FIELD = "__soft_delete";
public static final String SOFT_DELETES_FIELD = "__soft_deletes";
public static final NamedAnalyzer STANDARD_ANALYZER = new NamedAnalyzer("_standard", AnalyzerScope.GLOBAL, new StandardAnalyzer());
public static final NamedAnalyzer KEYWORD_ANALYZER = new NamedAnalyzer("_keyword", AnalyzerScope.GLOBAL, new KeywordAnalyzer());
@ -201,7 +201,7 @@ public class Lucene {
}
final CommitPoint cp = new CommitPoint(si, directory);
try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setIndexCommit(cp)
.setCommitOnClose(false)
.setMergePolicy(NoMergePolicy.INSTANCE)
@ -225,7 +225,7 @@ public class Lucene {
}
}
try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setMergePolicy(NoMergePolicy.INSTANCE) // no merges
.setCommitOnClose(false) // no commits
.setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append...
@ -884,7 +884,7 @@ public class Lucene {
if (hardLiveDocs == null) {
return new LeafReaderWithLiveDocs(leaf, null, leaf.maxDoc());
}
// TODO: Avoid recalculate numDocs everytime.
// TODO: Can we avoid calculate numDocs by using SegmentReader#getSegmentInfo with LUCENE-8458?
int numDocs = 0;
for (int i = 0; i < hardLiveDocs.length(); i++) {
if (hardLiveDocs.get(i)) {
@ -910,7 +910,7 @@ public class Lucene {
/**
* Returns a numeric docvalues which can be used to soft-delete documents.
*/
public static NumericDocValuesField newSoftDeleteField() {
return new NumericDocValuesField(SOFT_DELETE_FIELD, 1);
public static NumericDocValuesField newSoftDeletesField() {
return new NumericDocValuesField(SOFT_DELETES_FIELD, 1);
}
}

View File

@ -70,7 +70,7 @@ final class PerThreadIDVersionAndSeqNoLookup {
final Terms terms = reader.terms(uidField);
if (terms == null) {
// If a segment contains only no-ops, it does not have _uid but has both _soft_deletes and _tombstone fields.
final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETE_FIELD);
final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD);
final NumericDocValues tombstoneDV = reader.getNumericDocValues(SeqNoFieldMapper.TOMBSTONE_NAME);
if (softDeletesDV == null || tombstoneDV == null) {
throw new IllegalArgumentException("reader does not have _uid terms but not a no-op segment; " +

View File

@ -204,8 +204,10 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
case IndexMetaData.SETTING_VERSION_UPGRADED:
case IndexMetaData.SETTING_INDEX_PROVIDED_NAME:
case MergePolicyConfig.INDEX_MERGE_ENABLED:
case IndexMetaData.INDEX_SHRINK_SOURCE_UUID_KEY:
case IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY:
// we keep the shrink settings for BWC - this can be removed in 8.0
// we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0
case "index.shrink.source.uuid":
case "index.shrink.source.name":
case IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY:
case IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY:
return true;

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.single.SingleNodeDiscovery;
import org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider;
import org.elasticsearch.discovery.zen.SettingsBasedHostsProvider;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.discovery.zen.ZenDiscovery;
@ -40,6 +41,7 @@ import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@ -69,10 +71,11 @@ public class DiscoveryModule {
public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportService transportService,
NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService, MasterService masterService,
ClusterApplier clusterApplier, ClusterSettings clusterSettings, List<DiscoveryPlugin> plugins,
AllocationService allocationService) {
AllocationService allocationService, Path configFile) {
final Collection<BiConsumer<DiscoveryNode,ClusterState>> joinValidators = new ArrayList<>();
final Map<String, Supplier<UnicastHostsProvider>> hostProviders = new HashMap<>();
hostProviders.put("settings", () -> new SettingsBasedHostsProvider(settings, transportService));
hostProviders.put("file", () -> new FileBasedUnicastHostsProvider(settings, configFile));
for (DiscoveryPlugin plugin : plugins) {
plugin.getZenHostsProviders(transportService, networkService).entrySet().forEach(entry -> {
if (hostProviders.put(entry.getKey(), entry.getValue()) != null) {

View File

@ -0,0 +1,92 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.discovery.zen;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* An implementation of {@link UnicastHostsProvider} that reads hosts/ports
* from {@link #UNICAST_HOSTS_FILE}.
*
* Each unicast host/port that is part of the discovery process must be listed on
* a separate line. If the port is left off an entry, a default port of 9300 is
* assumed. An example unicast hosts file could read:
*
* 67.81.244.10
* 67.81.244.11:9305
* 67.81.244.15:9400
*/
public class FileBasedUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider {
public static final String UNICAST_HOSTS_FILE = "unicast_hosts.txt";
private final Path unicastHostsFilePath;
private final Path legacyUnicastHostsFilePath;
public FileBasedUnicastHostsProvider(Settings settings, Path configFile) {
super(settings);
this.unicastHostsFilePath = configFile.resolve(UNICAST_HOSTS_FILE);
this.legacyUnicastHostsFilePath = configFile.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE);
}
private List<String> getHostsList() {
if (Files.exists(unicastHostsFilePath)) {
return readFileContents(unicastHostsFilePath);
}
if (Files.exists(legacyUnicastHostsFilePath)) {
deprecationLogger.deprecated("Found dynamic hosts list at [{}] but this path is deprecated. This list should be at [{}] " +
"instead. Support for the deprecated path will be removed in future.", legacyUnicastHostsFilePath, unicastHostsFilePath);
return readFileContents(legacyUnicastHostsFilePath);
}
logger.warn("expected, but did not find, a dynamic hosts list at [{}]", unicastHostsFilePath);
return Collections.emptyList();
}
private List<String> readFileContents(Path path) {
try (Stream<String> lines = Files.lines(path)) {
return lines.filter(line -> line.startsWith("#") == false) // lines starting with `#` are comments
.collect(Collectors.toList());
} catch (IOException e) {
logger.warn(() -> new ParameterizedMessage("failed to read file [{}]", unicastHostsFilePath), e);
return Collections.emptyList();
}
}
@Override
public List<TransportAddress> buildDynamicHosts(HostsResolver hostsResolver) {
final List<TransportAddress> transportAddresses = hostsResolver.resolveHosts(getHostsList(), 1);
logger.debug("seed addresses: {}", transportAddresses);
return transportAddresses;
}
}

View File

@ -242,7 +242,7 @@ public final class IndexSettings {
* Specifies if the index should use soft-delete instead of hard-delete for update/delete operations.
*/
public static final Setting<Boolean> INDEX_SOFT_DELETES_SETTING =
Setting.boolSetting("index.soft_deletes.enabled", true, Property.IndexScope, Property.Final);
Setting.boolSetting("index.soft_deletes.enabled", false, Property.IndexScope, Property.Final);
/**
* Controls how many soft-deleted documents will be kept around before being merged away. Keeping more deleted

View File

@ -592,6 +592,28 @@ public abstract class Engine implements Closeable {
*/
public abstract Closeable acquireRetentionLockForPeerRecovery();
/**
* Creates a new history snapshot from Lucene for reading operations whose seqno in the requesting seqno range (both inclusive)
*/
public abstract Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService,
long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException;
/**
* Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive).
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException;
/**
* Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine.
*/
public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException;
/**
* Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog)
*/
public abstract boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException;
public abstract TranslogStats getTranslogStats();
/**
@ -599,25 +621,6 @@ public abstract class Engine implements Closeable {
*/
public abstract Translog.Location getTranslogLastWriteLocation();
/**
* Creates a new "translog" snapshot from Lucene for reading operations whose seqno in the requesting seqno range
*/
public abstract Translog.Snapshot newLuceneChangesSnapshot(String source, MapperService mapperService,
long minSeqNo, long maxSeqNo, boolean requiredFullRange) throws IOException;
/**
* Creates a new history snapshot for reading operations since the provided seqno.
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException;
/**
* Returns the estimated number of history operations whose seq# at least the provided seq# in this engine.
*/
public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException;
public abstract boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException;
protected final void ensureOpen(Exception suppressed) {
if (isClosed.get()) {
AlreadyClosedException ace = new AlreadyClosedException(shardId + " engine is closed", failedEngine.get());

View File

@ -149,7 +149,7 @@ public class InternalEngine extends Engine {
private final CounterMetric numDocDeletes = new CounterMetric();
private final CounterMetric numDocAppends = new CounterMetric();
private final CounterMetric numDocUpdates = new CounterMetric();
private final NumericDocValuesField softDeleteField = Lucene.newSoftDeleteField();
private final NumericDocValuesField softDeletesField = Lucene.newSoftDeletesField();
private final boolean softDeleteEnabled;
private final SoftDeletesPolicy softDeletesPolicy;
private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener;
@ -487,7 +487,7 @@ public class InternalEngine extends Engine {
@Override
public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException {
if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) {
return newLuceneChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false);
return newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false);
} else {
return getTranslog().newSnapshotFromMinSeqNo(startingSeqNo);
}
@ -499,12 +499,8 @@ public class InternalEngine extends Engine {
@Override
public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException {
if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) {
try (Translog.Snapshot snapshot =
newLuceneChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) {
try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) {
return snapshot.totalOperations();
} catch (IOException ex) {
maybeFailEngine(source, ex);
throw ex;
}
} else {
return getTranslog().estimateTotalOperationsFromMinSeq(startingSeqNo);
@ -1050,7 +1046,9 @@ public class InternalEngine extends Engine {
private void addStaleDocs(final List<ParseContext.Document> docs, final IndexWriter indexWriter) throws IOException {
assert softDeleteEnabled : "Add history documents but soft-deletes is disabled";
docs.forEach(d -> d.add(softDeleteField));
for (ParseContext.Document doc : docs) {
doc.add(softDeletesField); // soft-deleted every document before adding to Lucene
}
if (docs.size() > 1) {
indexWriter.addDocuments(docs);
} else {
@ -1143,9 +1141,9 @@ public class InternalEngine extends Engine {
private void updateDocs(final Term uid, final List<ParseContext.Document> docs, final IndexWriter indexWriter) throws IOException {
if (softDeleteEnabled) {
if (docs.size() > 1) {
indexWriter.softUpdateDocuments(uid, docs, softDeleteField);
indexWriter.softUpdateDocuments(uid, docs, softDeletesField);
} else {
indexWriter.softUpdateDocument(uid, docs.get(0), softDeleteField);
indexWriter.softUpdateDocument(uid, docs.get(0), softDeletesField);
}
} else {
if (docs.size() > 1) {
@ -1290,11 +1288,11 @@ public class InternalEngine extends Engine {
final ParseContext.Document doc = tombstone.docs().get(0);
assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null :
"Delete tombstone document but _tombstone field is not set [" + doc + " ]";
doc.add(softDeleteField);
doc.add(softDeletesField);
if (plan.addStaleOpToLucene || plan.currentlyDeleted) {
indexWriter.addDocument(doc);
} else {
indexWriter.softUpdateDocument(delete.uid(), doc, softDeleteField);
indexWriter.softUpdateDocument(delete.uid(), doc, softDeletesField);
}
} else if (plan.currentlyDeleted == false) {
// any exception that comes from this is a either an ACE or a fatal exception there
@ -1405,7 +1403,7 @@ public class InternalEngine extends Engine {
final ParseContext.Document doc = tombstone.docs().get(0);
assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null
: "Noop tombstone document but _tombstone field is not set [" + doc + " ]";
doc.add(softDeleteField);
doc.add(softDeletesField);
indexWriter.addDocument(doc);
} catch (Exception ex) {
if (maybeFailEngine("noop", ex)) {
@ -1439,6 +1437,7 @@ public class InternalEngine extends Engine {
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
// both refresh types will result in an internal refresh but only the external will also
// pass the new reader reference to the external reader manager.
final long localCheckpointBeforeRefresh = getLocalCheckpoint();
// this will also cause version map ram to be freed hence we always account for it.
final long bytes = indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh();
@ -1464,6 +1463,7 @@ public class InternalEngine extends Engine {
} finally {
store.decRef();
}
lastRefreshedCheckpointListener.updateRefreshedCheckpoint(localCheckpointBeforeRefresh);
}
} catch (AlreadyClosedException e) {
failOnTragicEvent(e);
@ -1478,7 +1478,8 @@ public class InternalEngine extends Engine {
} finally {
writingBytes.addAndGet(-bytes);
}
assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " +
"local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint();
// TODO: maybe we should just put a scheduled job in threadPool?
// We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes
// for a long time:
@ -2084,9 +2085,9 @@ public class InternalEngine extends Engine {
// background merges
MergePolicy mergePolicy = config().getMergePolicy();
if (softDeleteEnabled) {
iwc.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD);
iwc.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD);
mergePolicy = new RecoverySourcePruneMergePolicy(SourceFieldMapper.RECOVERY_SOURCE_NAME, softDeletesPolicy::getRetentionQuery,
new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, softDeletesPolicy::getRetentionQuery, mergePolicy));
}
iwc.setMergePolicy(new ElasticsearchMergePolicy(mergePolicy));
iwc.setSimilarity(engineConfig.getSimilarity());
@ -2439,19 +2440,26 @@ public class InternalEngine extends Engine {
}
@Override
public Translog.Snapshot newLuceneChangesSnapshot(String source, MapperService mapperService,
long minSeqNo, long maxSeqNo, boolean requiredFullRange) throws IOException {
public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService,
long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException {
// TODO: Should we defer the refresh until we really need it?
ensureOpen();
if (lastRefreshedCheckpoint() < maxSeqNo) {
if (lastRefreshedCheckpoint() < toSeqNo) {
refresh(source, SearcherScope.INTERNAL);
}
Searcher searcher = acquireSearcher(source, SearcherScope.INTERNAL);
try {
LuceneChangesSnapshot snapshot = new LuceneChangesSnapshot(
searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, minSeqNo, maxSeqNo, requiredFullRange);
searcher, mapperService, LuceneChangesSnapshot.DEFAULT_BATCH_SIZE, fromSeqNo, toSeqNo, requiredFullRange);
searcher = null;
return snapshot;
} catch (Exception e) {
try {
maybeFailEngine("acquire changes snapshot", e);
} catch (Exception inner) {
e.addSuppressed(inner);
}
throw e;
} finally {
IOUtils.close(searcher);
}
@ -2487,9 +2495,11 @@ public class InternalEngine extends Engine {
@Override
public Closeable acquireRetentionLockForPeerRecovery() {
final Closeable translogLock = translog.acquireRetentionLock();
final Releasable softDeletesLock = softDeletesPolicy.acquireRetentionLock();
return () -> IOUtils.close(translogLock, softDeletesLock);
if (softDeleteEnabled) {
return softDeletesPolicy.acquireRetentionLock();
} else {
return translog.acquireRetentionLock();
}
}
@Override
@ -2545,21 +2555,31 @@ public class InternalEngine extends Engine {
final long lastRefreshedCheckpoint() {
return lastRefreshedCheckpointListener.refreshedCheckpoint.get();
}
private final class LastRefreshedCheckpointListener implements ReferenceManager.RefreshListener {
final AtomicLong refreshedCheckpoint;
private long pendingCheckpoint;
LastRefreshedCheckpointListener(long initialLocalCheckpoint) {
this.refreshedCheckpoint = new AtomicLong(initialLocalCheckpoint);
}
@Override
public void beforeRefresh() {
pendingCheckpoint = localCheckpointTracker.getCheckpoint(); // All change until this point should be visible after refresh
// all changes until this point should be visible after refresh
pendingCheckpoint = localCheckpointTracker.getCheckpoint();
}
@Override
public void afterRefresh(boolean didRefresh) {
if (didRefresh) {
refreshedCheckpoint.set(pendingCheckpoint);
updateRefreshedCheckpoint(pendingCheckpoint);
}
}
void updateRefreshedCheckpoint(long checkpoint) {
refreshedCheckpoint.updateAndGet(curr -> Math.max(curr, checkpoint));
assert refreshedCheckpoint.get() >= checkpoint : refreshedCheckpoint.get() + " < " + checkpoint;
}
}
}

View File

@ -29,7 +29,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedNumericSortField;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.ArrayUtil;
import org.elasticsearch.common.bytes.BytesReference;
@ -86,8 +85,8 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
if (fromSeqNo < 0 || toSeqNo < 0 || fromSeqNo > toSeqNo) {
throw new IllegalArgumentException("Invalid range; from_seqno [" + fromSeqNo + "], to_seqno [" + toSeqNo + "]");
}
if (searchBatchSize < 0) {
throw new IllegalArgumentException("Search_batch_size must not be negative [" + searchBatchSize + "]");
if (searchBatchSize <= 0) {
throw new IllegalArgumentException("Search_batch_size must be positive [" + searchBatchSize + "]");
}
final AtomicBoolean closed = new AtomicBoolean();
this.onClose = () -> {
@ -213,10 +212,10 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
}
private TopDocs searchOperations(ScoreDoc after) throws IOException {
final Query rangeQuery = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, fromSeqNo, toSeqNo);
final Query rangeQuery = LongPoint.newRangeQuery(SeqNoFieldMapper.NAME, lastSeenSeqNo + 1, toSeqNo);
final Sort sortedBySeqNoThenByTerm = new Sort(
new SortedNumericSortField(SeqNoFieldMapper.NAME, SortField.Type.LONG),
new SortedNumericSortField(SeqNoFieldMapper.PRIMARY_TERM_NAME, SortField.Type.LONG, true)
new SortField(SeqNoFieldMapper.NAME, SortField.Type.LONG),
new SortField(SeqNoFieldMapper.PRIMARY_TERM_NAME, SortField.Type.LONG, true)
);
return indexSearcher.searchAfter(after, rangeQuery, searchBatchSize, sortedBySeqNoThenByTerm);
}
@ -281,9 +280,9 @@ final class LuceneChangesSnapshot implements Translog.Snapshot {
}
private boolean assertDocSoftDeleted(LeafReader leafReader, int segmentDocId) throws IOException {
final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETE_FIELD);
final NumericDocValues ndv = leafReader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD);
if (ndv == null || ndv.advanceExact(segmentDocId) == false) {
throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETE_FIELD + "] is not found");
throw new IllegalStateException("DocValues for field [" + Lucene.SOFT_DELETES_FIELD + "] is not found");
}
return ndv.longValue() == 1;
}

View File

@ -72,7 +72,7 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy {
builder.add(retainSourceQuerySupplier.get(), BooleanClause.Occur.FILTER);
IndexSearcher s = new IndexSearcher(reader);
s.setQueryCache(null);
Weight weight = s.createWeight(builder.build(), false, 1.0f);
Weight weight = s.createWeight(s.rewrite(builder.build()), false, 1.0f);
Scorer scorer = weight.scorer(reader.getContext());
if (scorer != null) {
return new SourcePruningFilterCodecReader(recoverySourceField, reader, BitSet.of(scorer.iterator(), reader.maxDoc()));

View File

@ -1659,19 +1659,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
/**
* Creates a new "translog" snapshot from Lucene for reading operations whose seqno is between minSeqNo and maxSeqNo.
* The caller has to close the returned snapshot after finishing the reading.
* Creates a new changes snapshot for reading operations whose seq_no are between {@code fromSeqNo}(inclusive)
* and {@code toSeqNo}(inclusive). The caller has to close the returned snapshot after finishing the reading.
*
* @param source the source of the request
* @param minSeqNo the min_seqno to read - inclusive
* @param maxSeqNo the max_seqno to read - inclusive
* @param requiredFullRange if true then {@link Translog.Snapshot#next()} will throw {@link IllegalStateException}
* if any operation between minSeqNo and maxSeqNo is missing. This parameter should be only
* enabled when the requesting range is below the global checkpoint.
* @param fromSeqNo the from seq_no (inclusive) to read
* @param toSeqNo the to seq_no (inclusive) to read
* @param requiredFullRange if {@code true} then {@link Translog.Snapshot#next()} will throw {@link IllegalStateException}
* if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing.
* This parameter should be only enabled when the entire requesting range is below the global checkpoint.
*/
public Translog.Snapshot newLuceneChangesSnapshot(String source, long minSeqNo, long maxSeqNo,
boolean requiredFullRange) throws IOException {
return getEngine().newLuceneChangesSnapshot(source, mapperService, minSeqNo, maxSeqNo, requiredFullRange);
public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException {
return getEngine().newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange);
}
public List<Segment> segments(boolean verbose) {

View File

@ -156,7 +156,7 @@ final class StoreRecovery {
final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target);
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setCommitOnClose(false)
// we don't want merges to happen here - we call maybe merge on the engine
// later once we stared it up otherwise we would need to wait for it here

View File

@ -1594,7 +1594,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
throws IOException {
assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit";
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setCommitOnClose(false)
.setIndexCommit(commit)
// we don't want merges to happen here - we call maybe merge on the engine

View File

@ -194,7 +194,6 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
Translog.Operation prvOp = Translog.readOperation(
new BufferedChecksumStreamInput(previous.v1().streamInput(), "assertion"));
// TODO: We haven't had timestamp for Index operations in Lucene yet, we need to loosen this check without timestamp.
// We don't store versionType in Lucene index, we need to exclude it from this check
final boolean sameOp;
if (newOp instanceof Translog.Index && prvOp instanceof Translog.Index) {
final Translog.Index o1 = (Translog.Index) prvOp;

View File

@ -178,7 +178,7 @@ public class TruncateTranslogCommand extends EnvironmentAwareCommand {
terminal.println("Marking index with the new history uuid");
// commit the new histroy id
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setCommitOnClose(false)
// we don't want merges to happen here - we call maybe merge on the engine
// later once we stared it up otherwise we would need to wait for it here

View File

@ -0,0 +1,381 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.elasticsearch.script.IngestConditionalScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
public class ConditionalProcessor extends AbstractProcessor {
static final String TYPE = "conditional";
private final Script condition;
private final ScriptService scriptService;
private final Processor processor;
ConditionalProcessor(String tag, Script script, ScriptService scriptService, Processor processor) {
super(tag);
this.condition = script;
this.scriptService = scriptService;
this.processor = processor;
}
@Override
public void execute(IngestDocument ingestDocument) throws Exception {
IngestConditionalScript script =
scriptService.compile(condition, IngestConditionalScript.CONTEXT).newInstance(condition.getParams());
if (script.execute(new UnmodifiableIngestData(ingestDocument.getSourceAndMetadata()))) {
processor.execute(ingestDocument);
}
}
@Override
public String getType() {
return TYPE;
}
private static Object wrapUnmodifiable(Object raw) {
// Wraps all mutable types that the JSON parser can create by immutable wrappers.
// Any inputs not wrapped are assumed to be immutable
if (raw instanceof Map) {
return new UnmodifiableIngestData((Map<String, Object>) raw);
} else if (raw instanceof List) {
return new UnmodifiableIngestList((List<Object>) raw);
} else if (raw instanceof byte[]) {
return ((byte[]) raw).clone();
}
return raw;
}
private static UnsupportedOperationException unmodifiableException() {
return new UnsupportedOperationException("Mutating ingest documents in conditionals is not supported");
}
private static final class UnmodifiableIngestData implements Map<String, Object> {
private final Map<String, Object> data;
UnmodifiableIngestData(Map<String, Object> data) {
this.data = data;
}
@Override
public int size() {
return data.size();
}
@Override
public boolean isEmpty() {
return data.isEmpty();
}
@Override
public boolean containsKey(final Object key) {
return data.containsKey(key);
}
@Override
public boolean containsValue(final Object value) {
return data.containsValue(value);
}
@Override
public Object get(final Object key) {
return wrapUnmodifiable(data.get(key));
}
@Override
public Object put(final String key, final Object value) {
throw unmodifiableException();
}
@Override
public Object remove(final Object key) {
throw unmodifiableException();
}
@Override
public void putAll(final Map<? extends String, ?> m) {
throw unmodifiableException();
}
@Override
public void clear() {
throw unmodifiableException();
}
@Override
public Set<String> keySet() {
return Collections.unmodifiableSet(data.keySet());
}
@Override
public Collection<Object> values() {
return new UnmodifiableIngestList(new ArrayList<>(data.values()));
}
@Override
public Set<Entry<String, Object>> entrySet() {
return data.entrySet().stream().map(entry ->
new Entry<String, Object>() {
@Override
public String getKey() {
return entry.getKey();
}
@Override
public Object getValue() {
return wrapUnmodifiable(entry.getValue());
}
@Override
public Object setValue(final Object value) {
throw unmodifiableException();
}
@Override
public boolean equals(final Object o) {
return entry.equals(o);
}
@Override
public int hashCode() {
return entry.hashCode();
}
}).collect(Collectors.toSet());
}
}
private static final class UnmodifiableIngestList implements List<Object> {
private final List<Object> data;
UnmodifiableIngestList(List<Object> data) {
this.data = data;
}
@Override
public int size() {
return data.size();
}
@Override
public boolean isEmpty() {
return data.isEmpty();
}
@Override
public boolean contains(final Object o) {
return data.contains(o);
}
@Override
public Iterator<Object> iterator() {
Iterator<Object> wrapped = data.iterator();
return new Iterator<Object>() {
@Override
public boolean hasNext() {
return wrapped.hasNext();
}
@Override
public Object next() {
return wrapped.next();
}
@Override
public void remove() {
throw unmodifiableException();
}
};
}
@Override
public Object[] toArray() {
Object[] wrapped = data.toArray(new Object[0]);
for (int i = 0; i < wrapped.length; i++) {
wrapped[i] = wrapUnmodifiable(wrapped[i]);
}
return wrapped;
}
@Override
public <T> T[] toArray(final T[] a) {
Object[] raw = data.toArray(new Object[0]);
T[] wrapped = (T[]) Arrays.copyOf(raw, a.length, a.getClass());
for (int i = 0; i < wrapped.length; i++) {
wrapped[i] = (T) wrapUnmodifiable(wrapped[i]);
}
return wrapped;
}
@Override
public boolean add(final Object o) {
throw unmodifiableException();
}
@Override
public boolean remove(final Object o) {
throw unmodifiableException();
}
@Override
public boolean containsAll(final Collection<?> c) {
return data.contains(c);
}
@Override
public boolean addAll(final Collection<?> c) {
throw unmodifiableException();
}
@Override
public boolean addAll(final int index, final Collection<?> c) {
throw unmodifiableException();
}
@Override
public boolean removeAll(final Collection<?> c) {
throw unmodifiableException();
}
@Override
public boolean retainAll(final Collection<?> c) {
throw unmodifiableException();
}
@Override
public void clear() {
throw unmodifiableException();
}
@Override
public Object get(final int index) {
return wrapUnmodifiable(data.get(index));
}
@Override
public Object set(final int index, final Object element) {
throw unmodifiableException();
}
@Override
public void add(final int index, final Object element) {
throw unmodifiableException();
}
@Override
public Object remove(final int index) {
throw unmodifiableException();
}
@Override
public int indexOf(final Object o) {
return data.indexOf(o);
}
@Override
public int lastIndexOf(final Object o) {
return data.lastIndexOf(o);
}
@Override
public ListIterator<Object> listIterator() {
return new UnmodifiableListIterator(data.listIterator());
}
@Override
public ListIterator<Object> listIterator(final int index) {
return new UnmodifiableListIterator(data.listIterator(index));
}
@Override
public List<Object> subList(final int fromIndex, final int toIndex) {
return new UnmodifiableIngestList(data.subList(fromIndex, toIndex));
}
private static final class UnmodifiableListIterator implements ListIterator<Object> {
private final ListIterator<Object> data;
UnmodifiableListIterator(ListIterator<Object> data) {
this.data = data;
}
@Override
public boolean hasNext() {
return data.hasNext();
}
@Override
public Object next() {
return wrapUnmodifiable(data.next());
}
@Override
public boolean hasPrevious() {
return data.hasPrevious();
}
@Override
public Object previous() {
return wrapUnmodifiable(data.previous());
}
@Override
public int nextIndex() {
return data.nextIndex();
}
@Override
public int previousIndex() {
return data.previousIndex();
}
@Override
public void remove() {
throw unmodifiableException();
}
@Override
public void set(final Object o) {
throw unmodifiableException();
}
@Override
public void add(final Object o) {
throw unmodifiableException();
}
}
}
}

View File

@ -19,9 +19,18 @@
package org.elasticsearch.ingest;
import java.io.IOException;
import java.io.InputStream;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
@ -296,6 +305,7 @@ public final class ConfigurationUtils {
}
public static List<Processor> readProcessorConfigs(List<Map<String, Object>> processorConfigs,
ScriptService scriptService,
Map<String, Processor.Factory> processorFactories) throws Exception {
Exception exception = null;
List<Processor> processors = new ArrayList<>();
@ -303,7 +313,7 @@ public final class ConfigurationUtils {
for (Map<String, Object> processorConfigWithKey : processorConfigs) {
for (Map.Entry<String, Object> entry : processorConfigWithKey.entrySet()) {
try {
processors.add(readProcessor(processorFactories, entry.getKey(), entry.getValue()));
processors.add(readProcessor(processorFactories, scriptService, entry.getKey(), entry.getValue()));
} catch (Exception e) {
exception = ExceptionsHelper.useOrSuppress(exception, e);
}
@ -356,13 +366,14 @@ public final class ConfigurationUtils {
@SuppressWarnings("unchecked")
public static Processor readProcessor(Map<String, Processor.Factory> processorFactories,
ScriptService scriptService,
String type, Object config) throws Exception {
if (config instanceof Map) {
return readProcessor(processorFactories, type, (Map<String, Object>) config);
return readProcessor(processorFactories, scriptService, type, (Map<String, Object>) config);
} else if (config instanceof String && "script".equals(type)) {
Map<String, Object> normalizedScript = new HashMap<>(1);
normalizedScript.put(ScriptType.INLINE.getParseField().getPreferredName(), config);
return readProcessor(processorFactories, type, normalizedScript);
return readProcessor(processorFactories, scriptService, type, normalizedScript);
} else {
throw newConfigurationException(type, null, null,
"property isn't a map, but of type [" + config.getClass().getName() + "]");
@ -370,15 +381,17 @@ public final class ConfigurationUtils {
}
public static Processor readProcessor(Map<String, Processor.Factory> processorFactories,
ScriptService scriptService,
String type, Map<String, Object> config) throws Exception {
String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY);
Script conditionalScript = extractConditional(config);
Processor.Factory factory = processorFactories.get(type);
if (factory != null) {
boolean ignoreFailure = ConfigurationUtils.readBooleanProperty(null, null, config, "ignore_failure", false);
List<Map<String, Object>> onFailureProcessorConfigs =
ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorFactories);
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories);
if (onFailureProcessorConfigs != null && onFailureProcessors.isEmpty()) {
throw newConfigurationException(type, tag, Pipeline.ON_FAILURE_KEY,
@ -392,14 +405,42 @@ public final class ConfigurationUtils {
type, Arrays.toString(config.keySet().toArray()));
}
if (onFailureProcessors.size() > 0 || ignoreFailure) {
return new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors);
} else {
return processor;
processor = new CompoundProcessor(ignoreFailure, Collections.singletonList(processor), onFailureProcessors);
}
if (conditionalScript != null) {
processor = new ConditionalProcessor(tag, conditionalScript, scriptService, processor);
}
return processor;
} catch (Exception e) {
throw newConfigurationException(type, tag, null, e);
}
}
throw newConfigurationException(type, tag, null, "No processor type exists with name [" + type + "]");
}
private static Script extractConditional(Map<String, Object> config) throws IOException {
Object scriptSource = config.remove("if");
if (scriptSource != null) {
try (XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent)
.map(normalizeScript(scriptSource));
InputStream stream = BytesReference.bytes(builder).streamInput();
XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY,
LoggingDeprecationHandler.INSTANCE, stream)) {
return Script.parse(parser);
}
}
return null;
}
@SuppressWarnings("unchecked")
private static Map<String, Object> normalizeScript(Object scriptConfig) {
if (scriptConfig instanceof Map<?, ?>) {
return (Map<String, Object>) scriptConfig;
} else if (scriptConfig instanceof String) {
return Collections.singletonMap("source", scriptConfig);
} else {
throw newConfigurationException("conditional", null, "script",
"property isn't a map or string, but of type [" + scriptConfig.getClass().getName() + "]");
}
}
}

View File

@ -71,6 +71,7 @@ public class IngestService implements ClusterStateApplier {
public static final String NOOP_PIPELINE_NAME = "_none";
private final ClusterService clusterService;
private final ScriptService scriptService;
private final Map<String, Processor.Factory> processorFactories;
// Ideally this should be in IngestMetadata class, but we don't have the processor factories around there.
// We know of all the processor factories when a node with all its plugin have been initialized. Also some
@ -85,6 +86,7 @@ public class IngestService implements ClusterStateApplier {
Environment env, ScriptService scriptService, AnalysisRegistry analysisRegistry,
List<IngestPlugin> ingestPlugins) {
this.clusterService = clusterService;
this.scriptService = scriptService;
this.processorFactories = processorFactories(
ingestPlugins,
new Processor.Parameters(
@ -116,6 +118,10 @@ public class IngestService implements ClusterStateApplier {
return clusterService;
}
public ScriptService getScriptService() {
return scriptService;
}
/**
* Deletes the pipeline specified by id in the request.
*/
@ -300,11 +306,12 @@ public class IngestService implements ClusterStateApplier {
}
Map<String, Object> pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2();
Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories);
Pipeline pipeline = Pipeline.create(request.getId(), pipelineConfig, processorFactories, scriptService);
List<Exception> exceptions = new ArrayList<>();
for (Processor processor : pipeline.flattenAllProcessors()) {
for (Map.Entry<DiscoveryNode, IngestInfo> entry : ingestInfos.entrySet()) {
if (entry.getValue().containsProcessor(processor.getType()) == false) {
String type = processor.getType();
if (entry.getValue().containsProcessor(type) == false && ConditionalProcessor.TYPE.equals(type) == false) {
String message = "Processor type [" + processor.getType() + "] is not installed on node [" + entry.getKey() + "]";
exceptions.add(
ConfigurationUtils.newConfigurationException(processor.getType(), processor.getTag(), null, message)
@ -452,7 +459,10 @@ public class IngestService implements ClusterStateApplier {
List<ElasticsearchParseException> exceptions = new ArrayList<>();
for (PipelineConfiguration pipeline : ingestMetadata.getPipelines().values()) {
try {
pipelines.put(pipeline.getId(), Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories));
pipelines.put(
pipeline.getId(),
Pipeline.create(pipeline.getId(), pipeline.getConfigAsMap(), processorFactories, scriptService)
);
} catch (ElasticsearchParseException e) {
pipelines.put(pipeline.getId(), substitutePipeline(pipeline.getId(), e));
exceptions.add(e);

View File

@ -26,6 +26,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.elasticsearch.script.ScriptService;
/**
* A pipeline is a list of {@link Processor} instances grouped under a unique id.
@ -52,14 +53,15 @@ public final class Pipeline {
}
public static Pipeline create(String id, Map<String, Object> config,
Map<String, Processor.Factory> processorFactories) throws Exception {
Map<String, Processor.Factory> processorFactories, ScriptService scriptService) throws Exception {
String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY);
Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null);
List<Map<String, Object>> processorConfigs = ConfigurationUtils.readList(null, null, config, PROCESSORS_KEY);
List<Processor> processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, processorFactories);
List<Processor> processors = ConfigurationUtils.readProcessorConfigs(processorConfigs, scriptService, processorFactories);
List<Map<String, Object>> onFailureProcessorConfigs =
ConfigurationUtils.readOptionalList(null, null, config, ON_FAILURE_KEY);
List<Processor> onFailureProcessors = ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, processorFactories);
List<Processor> onFailureProcessors =
ConfigurationUtils.readProcessorConfigs(onFailureProcessorConfigs, scriptService, processorFactories);
if (config.isEmpty() == false) {
throw new ElasticsearchParseException("pipeline [" + id +
"] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));

View File

@ -470,7 +470,7 @@ public class Node implements Closeable {
final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService, namedWriteableRegistry,
networkService, clusterService.getMasterService(), clusterService.getClusterApplierService(),
clusterService.getClusterSettings(), pluginsService.filterPlugins(DiscoveryPlugin.class),
clusterModule.getAllocationService());
clusterModule.getAllocationService(), environment.configFile());
this.nodeService = new NodeService(settings, threadPool, monitorService, discoveryModule.getDiscovery(),
transportService, indicesService, pluginsService, circuitBreakerService, scriptModule.getScriptService(),
httpServerTransport, ingestService, clusterService, settingsModule.getSettingsFilter(), responseCollectorService,

View File

@ -1492,7 +1492,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty
// shard anyway, we just create the empty shard here and then exit.
IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null)
.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
.setCommitOnClose(true));
writer.close();

View File

@ -87,13 +87,19 @@ public class RestClusterGetSettingsAction extends BaseRestHandler {
private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params)
throws IOException {
return
new ClusterGetSettingsResponse(
state.metaData().persistentSettings(),
state.metaData().transientSettings(),
renderDefaults ?
settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), this.settings)) :
Settings.EMPTY
).toXContent(builder, params);
return response(state, renderDefaults, settingsFilter, clusterSettings, settings).toXContent(builder, params);
}
static ClusterGetSettingsResponse response(
final ClusterState state,
final boolean renderDefaults,
final SettingsFilter settingsFilter,
final ClusterSettings clusterSettings,
final Settings settings) {
return new ClusterGetSettingsResponse(
settingsFilter.filter(state.metaData().persistentSettings()),
settingsFilter.filter(state.metaData().transientSettings()),
renderDefaults ? settingsFilter.filter(clusterSettings.diff(state.metaData().settings(), settings)) : Settings.EMPTY);
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.script;
import java.util.Map;
/**
* A script used by {@link org.elasticsearch.ingest.ConditionalProcessor}.
*/
public abstract class IngestConditionalScript {
public static final String[] PARAMETERS = { "ctx" };
/** The context used to compile {@link IngestConditionalScript} factories. */
public static final ScriptContext<Factory> CONTEXT = new ScriptContext<>("processor_conditional", Factory.class);
/** The generic runtime parameters for the script. */
private final Map<String, Object> params;
public IngestConditionalScript(Map<String, Object> params) {
this.params = params;
}
/** Return the parameters for this script. */
public Map<String, Object> getParams() {
return params;
}
public abstract boolean execute(Map<String, Object> ctx);
public interface Factory {
IngestConditionalScript newInstance(Map<String, Object> params);
}
}

View File

@ -51,6 +51,7 @@ public class ScriptModule {
BucketAggregationSelectorScript.CONTEXT,
SignificantTermsHeuristicScoreScript.CONTEXT,
IngestScript.CONTEXT,
IngestConditionalScript.CONTEXT,
FilterScript.CONTEXT,
SimilarityScript.CONTEXT,
SimilarityWeightScript.CONTEXT,

View File

@ -57,6 +57,7 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
if (nestedHit) {
value = getNestedSource((Map<String, Object>) value, hitContext);
}
try {
final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length());
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
@ -81,6 +82,9 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
private Map<String, Object> getNestedSource(Map<String, Object> sourceAsMap, HitContext hitContext) {
for (SearchHit.NestedIdentity o = hitContext.hit().getNestedIdentity(); o != null; o = o.getChild()) {
sourceAsMap = (Map<String, Object>) sourceAsMap.get(o.getField().string());
if (sourceAsMap == null) {
return null;
}
}
return sourceAsMap;
}

View File

@ -64,6 +64,7 @@ import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.repositories.IndexId;
@ -120,7 +121,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp
SETTING_NUMBER_OF_SHARDS,
SETTING_VERSION_CREATED,
SETTING_INDEX_UUID,
SETTING_CREATION_DATE));
SETTING_CREATION_DATE,
IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey()));
// It's OK to change some settings, but we shouldn't allow simply removing them
private static final Set<String> UNREMOVABLE_SETTINGS;

View File

@ -28,11 +28,15 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Predicate;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength;
public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTestCase<FieldCapabilitiesResponse> {
@ -48,22 +52,46 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe
@Override
protected FieldCapabilitiesResponse createTestInstance() {
Map<String, Map<String, FieldCapabilities>> responses = new HashMap<>();
if (randomBoolean()) {
// merged responses
Map<String, Map<String, FieldCapabilities>> responses = new HashMap<>();
String[] fields = generateRandomStringArray(5, 10, false, true);
assertNotNull(fields);
for (String field : fields) {
Map<String, FieldCapabilities> typesToCapabilities = new HashMap<>();
String[] types = generateRandomStringArray(5, 10, false, false);
assertNotNull(types);
for (String type : types) {
typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field));
}
responses.put(field, typesToCapabilities);
}
return new FieldCapabilitiesResponse(responses);
} else {
// non-merged responses
List<FieldCapabilitiesIndexResponse> responses = new ArrayList<>();
int numResponse = randomIntBetween(0, 10);
for (int i = 0; i < numResponse; i++) {
responses.add(createRandomIndexResponse());
}
return new FieldCapabilitiesResponse(responses);
}
}
private FieldCapabilitiesIndexResponse createRandomIndexResponse() {
Map<String, FieldCapabilities> responses = new HashMap<>();
String[] fields = generateRandomStringArray(5, 10, false, true);
assertNotNull(fields);
for (String field : fields) {
Map<String, FieldCapabilities> typesToCapabilities = new HashMap<>();
String[] types = generateRandomStringArray(5, 10, false, false);
assertNotNull(types);
for (String type : types) {
typesToCapabilities.put(type, FieldCapabilitiesTests.randomFieldCaps(field));
}
responses.put(field, typesToCapabilities);
responses.put(field, FieldCapabilitiesTests.randomFieldCaps(field));
}
return new FieldCapabilitiesResponse(responses);
return new FieldCapabilitiesIndexResponse(randomAsciiLettersOfLength(10), responses);
}
@Override
@ -138,6 +166,11 @@ public class FieldCapabilitiesResponseTests extends AbstractStreamableXContentTe
"}").replaceAll("\\s+", ""), generatedResponse);
}
public void testEmptyResponse() throws IOException {
FieldCapabilitiesResponse testInstance = new FieldCapabilitiesResponse();
assertSerialization(testInstance);
}
private static FieldCapabilitiesResponse createSimpleResponse() {
Map<String, FieldCapabilities> titleCapabilities = new HashMap<>();
titleCapabilities.put("text", new FieldCapabilities("title", "text", true, false));

View File

@ -0,0 +1,103 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class DiffableStringMapTests extends ESTestCase {
public void testDiffableStringMapDiff() {
Map<String, String> m = new HashMap<>();
m.put("foo", "bar");
m.put("baz", "eggplant");
m.put("potato", "canon");
DiffableStringMap dsm = new DiffableStringMap(m);
Map<String, String> m2 = new HashMap<>();
m2.put("foo", "not-bar");
m2.put("newkey", "yay");
m2.put("baz", "eggplant");
DiffableStringMap dsm2 = new DiffableStringMap(m2);
Diff<DiffableStringMap> diff = dsm2.diff(dsm);
assertThat(diff, instanceOf(DiffableStringMap.DiffableStringMapDiff.class));
DiffableStringMap.DiffableStringMapDiff dsmd = (DiffableStringMap.DiffableStringMapDiff) diff;
assertThat(dsmd.getDeletes(), containsInAnyOrder("potato"));
assertThat(dsmd.getDiffs().size(), equalTo(0));
Map<String, String> upserts = new HashMap<>();
upserts.put("foo", "not-bar");
upserts.put("newkey", "yay");
assertThat(dsmd.getUpserts(), equalTo(upserts));
DiffableStringMap dsm3 = diff.apply(dsm);
assertThat(dsm3.get("foo"), equalTo("not-bar"));
assertThat(dsm3.get("newkey"), equalTo("yay"));
assertThat(dsm3.get("baz"), equalTo("eggplant"));
assertThat(dsm3.get("potato"), equalTo(null));
}
public void testRandomDiffing() {
Map<String, String> m = new HashMap<>();
m.put("1", "1");
m.put("2", "2");
m.put("3", "3");
DiffableStringMap dsm = new DiffableStringMap(m);
DiffableStringMap expected = new DiffableStringMap(m);
for (int i = 0; i < randomIntBetween(5, 50); i++) {
if (randomBoolean() && expected.size() > 1) {
expected.remove(randomFrom(expected.keySet()));
} else if (randomBoolean()) {
expected.put(randomFrom(expected.keySet()), randomAlphaOfLength(4));
} else {
expected.put(randomAlphaOfLength(2), randomAlphaOfLength(4));
}
dsm = expected.diff(dsm).apply(dsm);
}
assertThat(expected, equalTo(dsm));
}
public void testSerialization() throws IOException {
Map<String, String> m = new HashMap<>();
// Occasionally have an empty map
if (frequently()) {
m.put("foo", "bar");
m.put("baz", "eggplant");
m.put("potato", "canon");
}
DiffableStringMap dsm = new DiffableStringMap(m);
BytesStreamOutput bso = new BytesStreamOutput();
dsm.writeTo(bso);
DiffableStringMap deserialized = new DiffableStringMap(bso.bytes().streamInput());
assertThat(deserialized, equalTo(dsm));
}
}

View File

@ -56,11 +56,11 @@ import org.hamcrest.Matchers;
import org.mockito.ArgumentCaptor;
import java.io.IOException;
import java.util.Map;
import java.util.HashSet;
import java.util.Set;
import java.util.Collections;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
@ -71,13 +71,13 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.startsWith;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.anyMap;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class IndexCreationTaskTests extends ESTestCase {
@ -127,14 +127,12 @@ public class IndexCreationTaskTests extends ESTestCase {
addMatchingTemplate(builder -> builder
.putAlias(AliasMetaData.builder("alias1"))
.putMapping("mapping1", createMapping())
.putCustom("custom1", createCustom())
.settings(Settings.builder().put("key1", "value1"))
);
final ClusterState result = executeTask();
assertThat(result.metaData().index("test").getAliases(), hasKey("alias1"));
assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1"));
assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1"));
assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1"));
}
@ -142,41 +140,31 @@ public class IndexCreationTaskTests extends ESTestCase {
public void testApplyDataFromRequest() throws Exception {
setupRequestAlias(new Alias("alias1"));
setupRequestMapping("mapping1", createMapping());
setupRequestCustom("custom1", createCustom());
reqSettings.put("key1", "value1");
final ClusterState result = executeTask();
assertThat(result.metaData().index("test").getAliases(), hasKey("alias1"));
assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1"));
assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1"));
assertThat(getMappingsFromResponse(), Matchers.hasKey("mapping1"));
}
public void testRequestDataHavePriorityOverTemplateData() throws Exception {
final IndexMetaData.Custom tplCustom = createCustom();
final IndexMetaData.Custom reqCustom = createCustom();
final IndexMetaData.Custom mergedCustom = createCustom();
when(reqCustom.mergeWith(tplCustom)).thenReturn(mergedCustom);
final CompressedXContent tplMapping = createMapping("text");
final CompressedXContent reqMapping = createMapping("keyword");
addMatchingTemplate(builder -> builder
.putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build())
.putMapping("mapping1", tplMapping)
.putCustom("custom1", tplCustom)
.settings(Settings.builder().put("key1", "tplValue"))
);
setupRequestAlias(new Alias("alias1").searchRouting("fromReq"));
setupRequestMapping("mapping1", reqMapping);
setupRequestCustom("custom1", reqCustom);
reqSettings.put("key1", "reqValue");
final ClusterState result = executeTask();
assertThat(result.metaData().index("test").getCustoms().get("custom1"), equalTo(mergedCustom));
assertThat(result.metaData().index("test").getAliases().get("alias1").getSearchRouting(), equalTo("fromReq"));
assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("reqValue"));
assertThat(getMappingsFromResponse().get("mapping1").toString(), equalTo("{type={properties={field={type=keyword}}}}"));
@ -272,14 +260,13 @@ public class IndexCreationTaskTests extends ESTestCase {
addMatchingTemplate(builder -> builder
.putAlias(AliasMetaData.builder("alias1").searchRouting("fromTpl").build())
.putMapping("mapping1", createMapping())
.putCustom("custom1", createCustom())
.settings(Settings.builder().put("key1", "tplValue"))
);
final ClusterState result = executeTask();
assertThat(result.metaData().index("test").getAliases(), not(hasKey("alias1")));
assertThat(result.metaData().index("test").getCustoms(), not(hasKey("custom1")));
assertThat(result.metaData().index("test").getCustomData(), not(hasKey("custom1")));
assertThat(result.metaData().index("test").getSettings().keySet(), not(Matchers.contains("key1")));
assertThat(getMappingsFromResponse(), not(Matchers.hasKey("mapping1")));
}
@ -296,7 +283,6 @@ public class IndexCreationTaskTests extends ESTestCase {
Boolean writeIndex = randomBoolean() ? null : randomBoolean();
setupRequestAlias(new Alias("alias1").writeIndex(writeIndex));
setupRequestMapping("mapping1", createMapping());
setupRequestCustom("custom1", createCustom());
reqSettings.put("key1", "value1");
final ClusterState result = executeTask();
@ -310,7 +296,6 @@ public class IndexCreationTaskTests extends ESTestCase {
.numberOfShards(1).numberOfReplicas(0).build();
idxBuilder.put("test2", existingWriteIndex);
setupRequestMapping("mapping1", createMapping());
setupRequestCustom("custom1", createCustom());
reqSettings.put("key1", "value1");
setupRequestAlias(new Alias("alias1").writeIndex(true));
@ -342,8 +327,8 @@ public class IndexCreationTaskTests extends ESTestCase {
.numberOfReplicas(numReplicas);
}
private IndexMetaData.Custom createCustom() {
return mock(IndexMetaData.Custom.class);
private Map<String, String> createCustom() {
return Collections.singletonMap("a", "b");
}
private interface MetaDataBuilderConfigurator {
@ -372,10 +357,6 @@ public class IndexCreationTaskTests extends ESTestCase {
when(request.mappings()).thenReturn(Collections.singletonMap(mappingKey, mapping.string()));
}
private void setupRequestCustom(String customKey, IndexMetaData.Custom custom) throws IOException {
when(request.customs()).thenReturn(Collections.singletonMap(customKey, custom));
}
private CompressedXContent createMapping() throws IOException {
return createMapping("text");
}

View File

@ -23,7 +23,9 @@ import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition;
import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition;
import org.elasticsearch.action.admin.indices.rollover.RolloverInfo;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
@ -45,6 +47,8 @@ import org.junit.Before;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import static org.hamcrest.Matchers.is;
@ -71,6 +75,9 @@ public class IndexMetaDataTests extends ESTestCase {
public void testIndexMetaDataSerialization() throws IOException {
Integer numShard = randomFrom(1, 2, 4, 8, 16);
int numberOfReplicas = randomIntBetween(0, 10);
Map<String, String> customMap = new HashMap<>();
customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10));
customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15));
IndexMetaData metaData = IndexMetaData.builder("foo")
.settings(Settings.builder()
.put("index.version.created", 1)
@ -80,6 +87,7 @@ public class IndexMetaDataTests extends ESTestCase {
.creationDate(randomLong())
.primaryTerm(0, 2)
.setRoutingNumShards(32)
.putCustom("my_custom", customMap)
.putRolloverInfo(
new RolloverInfo(randomAlphaOfLength(5),
Arrays.asList(new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())),
@ -93,7 +101,8 @@ public class IndexMetaDataTests extends ESTestCase {
builder.endObject();
XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder));
final IndexMetaData fromXContentMeta = IndexMetaData.fromXContent(parser);
assertEquals(metaData, fromXContentMeta);
assertEquals("expected: " + Strings.toString(metaData) + "\nactual : " + Strings.toString(fromXContentMeta),
metaData, fromXContentMeta);
assertEquals(metaData.hashCode(), fromXContentMeta.hashCode());
assertEquals(metaData.getNumberOfReplicas(), fromXContentMeta.getNumberOfReplicas());
@ -103,6 +112,11 @@ public class IndexMetaDataTests extends ESTestCase {
assertEquals(metaData.getCreationDate(), fromXContentMeta.getCreationDate());
assertEquals(metaData.getRoutingFactor(), fromXContentMeta.getRoutingFactor());
assertEquals(metaData.primaryTerm(0), fromXContentMeta.primaryTerm(0));
ImmutableOpenMap.Builder<String, DiffableStringMap> expectedCustomBuilder = ImmutableOpenMap.builder();
expectedCustomBuilder.put("my_custom", new DiffableStringMap(customMap));
ImmutableOpenMap<String, DiffableStringMap> expectedCustom = expectedCustomBuilder.build();
assertEquals(metaData.getCustomData(), expectedCustom);
assertEquals(metaData.getCustomData(), fromXContentMeta.getCustomData());
final BytesStreamOutput out = new BytesStreamOutput();
metaData.writeTo(out);
@ -119,6 +133,8 @@ public class IndexMetaDataTests extends ESTestCase {
assertEquals(metaData.getRoutingFactor(), deserialized.getRoutingFactor());
assertEquals(metaData.primaryTerm(0), deserialized.primaryTerm(0));
assertEquals(metaData.getRolloverInfos(), deserialized.getRolloverInfos());
assertEquals(deserialized.getCustomData(), expectedCustom);
assertEquals(metaData.getCustomData(), deserialized.getCustomData());
}
}

View File

@ -78,13 +78,13 @@ public class IndexTemplateMetaDataTests extends ESTestCase {
public void testValidateInvalidIndexPatterns() throws Exception {
final IllegalArgumentException emptyPatternError = expectThrows(IllegalArgumentException.class, () -> {
new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(),
Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of());
Collections.emptyList(), Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of());
});
assertThat(emptyPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got []"));
final IllegalArgumentException nullPatternError = expectThrows(IllegalArgumentException.class, () -> {
new IndexTemplateMetaData(randomRealisticUnicodeOfLengthBetween(5, 10), randomInt(), randomInt(),
null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of());
null, Settings.EMPTY, ImmutableOpenMap.of(), ImmutableOpenMap.of());
});
assertThat(nullPatternError.getMessage(), equalTo("Index patterns must not be null or empty; got null"));

View File

@ -286,16 +286,19 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
metaBuilder.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT).put("index.uuid", "1234"))
.numberOfShards(4).numberOfReplicas(0));
metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT).put("index.uuid", "5678")
.put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(1).numberOfReplicas(0));
.put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234"))
.numberOfShards(1)
.numberOfReplicas(0));
metaBuilder.put(IndexMetaData.builder("target2").settings(settings(Version.CURRENT).put("index.uuid", "9101112")
.put("index.shrink.source.name", "test").put("index.shrink.source.uuid", "1234")).numberOfShards(2).numberOfReplicas(0));
.put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY, "test").put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, "1234"))
.numberOfShards(2).numberOfReplicas(0));
MetaData metaData = metaBuilder.build();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
routingTableBuilder.addAsNew(metaData.index("test"));
routingTableBuilder.addAsNew(metaData.index("target"));
routingTableBuilder.addAsNew(metaData.index("target2"));
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData).routingTable(routingTableBuilder.build()).build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTableBuilder.build()).build();
AllocationService allocationService = createAllocationService();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")))
@ -330,7 +333,6 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
assertEquals(100L, DiskThresholdDecider.getExpectedShardSize(test_1, allocation, 0));
assertEquals(10L, DiskThresholdDecider.getExpectedShardSize(test_0, allocation, 0));
ShardRouting target = ShardRouting.newUnassigned(new ShardId(new Index("target", "5678"), 0),
true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
assertEquals(1110L, DiskThresholdDecider.getExpectedShardSize(target, allocation, 0));
@ -350,12 +352,9 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
.build();
allocationService.reroute(clusterState, "foo");
RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null,
clusterStateWithMissingSourceIndex.getRoutingNodes(), clusterStateWithMissingSourceIndex, info, 0);
assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target, allocationWithMissingSourceIndex, 42L));
assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target2, allocationWithMissingSourceIndex, 42L));
}
}

View File

@ -42,8 +42,8 @@ import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.Arrays;
import java.util.Collections;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_NAME;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_SHRINK_SOURCE_UUID;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_NAME;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_RESIZE_SOURCE_UUID;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
@ -151,8 +151,8 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
.putInSyncAllocationIds(1, Collections.singleton("aid1"))
.build();
metaData.put(sourceIndex, false);
indexSettings.put(INDEX_SHRINK_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID());
indexSettings.put(INDEX_SHRINK_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName());
indexSettings.put(INDEX_RESIZE_SOURCE_UUID.getKey(), sourceIndex.getIndexUUID());
indexSettings.put(INDEX_RESIZE_SOURCE_NAME.getKey(), sourceIndex.getIndex().getName());
} else {
sourceIndex = null;
}

View File

@ -416,8 +416,8 @@ public class LuceneTests extends ESTestCase {
public void testWrapAllDocsLive() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD, MatchAllDocsQuery::new, newMergePolicy()));
IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy()));
IndexWriter writer = new IndexWriter(dir, config);
int numDocs = between(1, 10);
Set<String> liveDocs = new HashSet<>();
@ -434,9 +434,9 @@ public class LuceneTests extends ESTestCase {
Document doc = new Document();
doc.add(new StringField("id", "v2-" + id, Store.YES));
if (randomBoolean()) {
doc.add(Lucene.newSoftDeleteField());
doc.add(Lucene.newSoftDeletesField());
}
writer.softUpdateDocument(new Term("id", id), doc, Lucene.newSoftDeleteField());
writer.softUpdateDocument(new Term("id", id), doc, Lucene.newSoftDeletesField());
liveDocs.add("v2-" + id);
}
}
@ -456,8 +456,8 @@ public class LuceneTests extends ESTestCase {
public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD, MatchAllDocsQuery::new, newMergePolicy()));
IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy()));
IndexWriter writer = new IndexWriter(dir, config);
int numDocs = between(1, 10);
List<String> liveDocs = new ArrayList<>();
@ -466,7 +466,7 @@ public class LuceneTests extends ESTestCase {
Document doc = new Document();
doc.add(new StringField("id", id, Store.YES));
if (randomBoolean()) {
doc.add(Lucene.newSoftDeleteField());
doc.add(Lucene.newSoftDeletesField());
}
writer.addDocument(doc);
liveDocs.add(id);

View File

@ -109,6 +109,7 @@ public abstract class AbstractDisruptionTestCase extends ESIntegTestCase {
protected void beforeIndexDeletion() throws Exception {
if (disableBeforeIndexDeletion == false) {
super.beforeIndexDeletion();
internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex();
assertSeqNos();
}
}

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.discovery;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -29,6 +28,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.discovery.zen.UnicastHostsProvider;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.plugins.DiscoveryPlugin;
@ -99,7 +99,7 @@ public class DiscoveryModuleTests extends ESTestCase {
private DiscoveryModule newModule(Settings settings, List<DiscoveryPlugin> plugins) {
return new DiscoveryModule(settings, threadPool, transportService, namedWriteableRegistry, null, masterService,
clusterApplier, clusterSettings, plugins, null);
clusterApplier, clusterSettings, plugins, null, createTempDir().toAbsolutePath());
}
public void testDefaults() {

View File

@ -17,7 +17,7 @@
* under the License.
*/
package org.elasticsearch.discovery.file;
package org.elasticsearch.discovery.zen;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService;
@ -26,9 +26,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.discovery.zen.UnicastZenPing;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.MockTransportService;
@ -50,16 +48,15 @@ import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import static org.elasticsearch.discovery.file.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE;
import static org.elasticsearch.discovery.zen.FileBasedUnicastHostsProvider.UNICAST_HOSTS_FILE;
/**
* Tests for {@link FileBasedUnicastHostsProvider}.
*/
public class FileBasedUnicastHostsProviderTests extends ESTestCase {
private boolean legacyLocation;
private ThreadPool threadPool;
private ExecutorService executorService;
private MockTransportService transportService;
private Path configPath;
@Before
public void setUp() throws Exception {
@ -83,23 +80,20 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase {
@Before
public void createTransportSvc() {
MockTcpTransport transport =
new MockTcpTransport(Settings.EMPTY,
threadPool,
BigArrays.NON_RECYCLING_INSTANCE,
new NoneCircuitBreakerService(),
new NamedWriteableRegistry(Collections.emptyList()),
new NetworkService(Collections.emptyList())) {
@Override
public BoundTransportAddress boundAddress() {
return new BoundTransportAddress(
new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)},
new TransportAddress(InetAddress.getLoopbackAddress(), 9300)
);
}
};
final MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
new NoneCircuitBreakerService(),
new NamedWriteableRegistry(Collections.emptyList()),
new NetworkService(Collections.emptyList())) {
@Override
public BoundTransportAddress boundAddress() {
return new BoundTransportAddress(
new TransportAddress[]{new TransportAddress(InetAddress.getLoopbackAddress(), 9300)},
new TransportAddress(InetAddress.getLoopbackAddress(), 9300)
);
}
};
transportService = new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
null);
null);
}
public void testBuildDynamicNodes() throws Exception {
@ -114,18 +108,27 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase {
assertEquals(9300, nodes.get(2).getPort());
}
public void testBuildDynamicNodesLegacyLocation() throws Exception {
legacyLocation = true;
testBuildDynamicNodes();
assertDeprecatedLocationWarning();
}
public void testEmptyUnicastHostsFile() throws Exception {
final List<String> hostEntries = Collections.emptyList();
final List<TransportAddress> addresses = setupAndRunHostProvider(hostEntries);
assertEquals(0, addresses.size());
}
public void testUnicastHostsDoesNotExist() throws Exception {
final Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.build();
final Environment environment = TestEnvironment.newEnvironment(settings);
final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment);
public void testEmptyUnicastHostsFileLegacyLocation() throws Exception {
legacyLocation = true;
testEmptyUnicastHostsFile();
assertDeprecatedLocationWarning();
}
public void testUnicastHostsDoesNotExist() {
final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(settings, createTempDir().toAbsolutePath());
final List<TransportAddress> addresses = provider.buildDynamicHosts((hosts, limitPortCounts) ->
UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService,
TimeValue.timeValueSeconds(10)));
@ -133,42 +136,60 @@ public class FileBasedUnicastHostsProviderTests extends ESTestCase {
}
public void testInvalidHostEntries() throws Exception {
List<String> hostEntries = Arrays.asList("192.168.0.1:9300:9300");
List<TransportAddress> addresses = setupAndRunHostProvider(hostEntries);
final List<String> hostEntries = Arrays.asList("192.168.0.1:9300:9300");
final List<TransportAddress> addresses = setupAndRunHostProvider(hostEntries);
assertEquals(0, addresses.size());
}
public void testInvalidHostEntriesLegacyLocation() throws Exception {
legacyLocation = true;
testInvalidHostEntries();
assertDeprecatedLocationWarning();
}
public void testSomeInvalidHostEntries() throws Exception {
List<String> hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301");
List<TransportAddress> addresses = setupAndRunHostProvider(hostEntries);
final List<String> hostEntries = Arrays.asList("192.168.0.1:9300:9300", "192.168.0.1:9301");
final List<TransportAddress> addresses = setupAndRunHostProvider(hostEntries);
assertEquals(1, addresses.size()); // only one of the two is valid and will be used
assertEquals("192.168.0.1", addresses.get(0).getAddress());
assertEquals(9301, addresses.get(0).getPort());
}
public void testSomeInvalidHostEntriesLegacyLocation() throws Exception {
legacyLocation = true;
testSomeInvalidHostEntries();
assertDeprecatedLocationWarning();
}
// sets up the config dir, writes to the unicast hosts file in the config dir,
// and then runs the file-based unicast host provider to get the list of discovery nodes
private List<TransportAddress> setupAndRunHostProvider(final List<String> hostEntries) throws IOException {
final Path homeDir = createTempDir();
final Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), homeDir)
.build();
final Path configPath;
.put(Environment.PATH_HOME_SETTING.getKey(), homeDir)
.build();
if (randomBoolean()) {
configPath = homeDir.resolve("config");
} else {
configPath = createTempDir();
}
final Path discoveryFilePath = configPath.resolve("discovery-file");
final Path discoveryFilePath = legacyLocation ? configPath.resolve("discovery-file") : configPath;
Files.createDirectories(discoveryFilePath);
final Path unicastHostsPath = discoveryFilePath.resolve(UNICAST_HOSTS_FILE);
try (BufferedWriter writer = Files.newBufferedWriter(unicastHostsPath)) {
writer.write(String.join("\n", hostEntries));
}
return new FileBasedUnicastHostsProvider(
new Environment(settings, configPath)).buildDynamicHosts((hosts, limitPortCounts) ->
UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService,
TimeValue.timeValueSeconds(10)));
return new FileBasedUnicastHostsProvider(settings, configPath).buildDynamicHosts((hosts, limitPortCounts) ->
UnicastZenPing.resolveHostsLists(executorService, logger, hosts, limitPortCounts, transportService,
TimeValue.timeValueSeconds(10)));
}
private void assertDeprecatedLocationWarning() {
assertWarnings("Found dynamic hosts list at [" +
configPath.resolve("discovery-file").resolve(UNICAST_HOSTS_FILE) +
"] but this path is deprecated. This list should be at [" +
configPath.resolve(UNICAST_HOSTS_FILE) +
"] instead. Support for the deprecated path will be removed in future.");
}
}

View File

@ -3841,7 +3841,7 @@ public class InternalEngineTests extends EngineTestCase {
maxSeqNo,
localCheckpoint);
trimUnsafeCommits(engine.config());
EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETE_FIELD,
EngineConfig noopEngineConfig = copy(engine.config(), new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD,
() -> new MatchAllDocsQuery(), engine.config().getMergePolicy()));
noOpEngine = new InternalEngine(noopEngineConfig, supplier) {
@Override
@ -4887,7 +4887,7 @@ public class InternalEngineTests extends EngineTestCase {
private void assertOperationHistoryInLucene(List<Engine.Operation> operations) throws IOException {
final MergePolicy keepSoftDeleteDocsMP = new SoftDeletesRetentionMergePolicy(
Lucene.SOFT_DELETE_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy());
Lucene.SOFT_DELETES_FIELD, () -> new MatchAllDocsQuery(), engine.config().getMergePolicy());
Settings.Builder settings = Settings.builder()
.put(defaultSettings.getSettings())
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
@ -4994,6 +4994,32 @@ public class InternalEngineTests extends EngineTestCase {
}
}
public void testLastRefreshCheckpoint() throws Exception {
AtomicBoolean done = new AtomicBoolean();
Thread[] refreshThreads = new Thread[between(1, 8)];
CountDownLatch latch = new CountDownLatch(refreshThreads.length);
for (int i = 0; i < refreshThreads.length; i++) {
latch.countDown();
refreshThreads[i] = new Thread(() -> {
while (done.get() == false) {
long checkPointBeforeRefresh = engine.getLocalCheckpoint();
engine.refresh("test", randomFrom(Engine.SearcherScope.values()));
assertThat(engine.lastRefreshedCheckpoint(), greaterThanOrEqualTo(checkPointBeforeRefresh));
}
});
refreshThreads[i].start();
}
latch.await();
List<Engine.Operation> ops = generateSingleDocHistory(true, VersionType.EXTERNAL, 1, 10, 1000, "1");
concurrentlyApplyOps(ops, engine);
done.set(true);
for (Thread thread : refreshThreads) {
thread.join();
}
engine.refresh("test");
assertThat(engine.lastRefreshedCheckpoint(), equalTo(engine.getLocalCheckpoint()));
}
private static void trimUnsafeCommits(EngineConfig config) throws IOException {
final Store store = config.getStore();
final TranslogConfig translogConfig = config.getTranslogConfig();

View File

@ -62,12 +62,12 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
long fromSeqNo = randomNonNegativeLong();
long toSeqNo = randomLongBetween(fromSeqNo, Long.MAX_VALUE);
// Empty engine
try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) {
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) {
IllegalStateException error = expectThrows(IllegalStateException.class, () -> drainAll(snapshot));
assertThat(error.getMessage(),
containsString("Not all operations between from_seqno [" + fromSeqNo + "] and to_seqno [" + toSeqNo + "] found"));
}
try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, false)) {
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, false)) {
assertThat(snapshot, SnapshotMatchers.size(0));
}
int numOps = between(1, 100);
@ -146,7 +146,7 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
// Get snapshot via engine will auto refresh
fromSeqNo = randomLongBetween(0, numOps - 1);
toSeqNo = randomLongBetween(fromSeqNo, numOps - 1);
try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, randomBoolean())) {
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, randomBoolean())) {
assertThat(snapshot, SnapshotMatchers.containsSeqNoRange(fromSeqNo, toSeqNo));
}
}
@ -182,7 +182,7 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
}
}
long maxSeqNo = engine.getLocalCheckpointTracker().getMaxSeqNo();
try (Translog.Snapshot snapshot = engine.newLuceneChangesSnapshot("test", mapperService, 0, maxSeqNo, false)) {
try (Translog.Snapshot snapshot = engine.newChangesSnapshot("test", mapperService, 0, maxSeqNo, false)) {
Translog.Operation op;
while ((op = snapshot.next()) != null) {
assertThat(op.toString(), op.primaryTerm(), equalTo(latestOperations.get(op.seqNo())));
@ -251,7 +251,7 @@ public class LuceneChangesSnapshotTests extends EngineTestCase {
long fromSeqNo = followerCheckpoint + 1;
long batchSize = randomLongBetween(0, 100);
long toSeqNo = Math.min(fromSeqNo + batchSize, leaderCheckpoint);
try (Translog.Snapshot snapshot = leader.newLuceneChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) {
try (Translog.Snapshot snapshot = leader.newChangesSnapshot("test", mapperService, fromSeqNo, toSeqNo, true)) {
translogHandler.run(follower, snapshot);
}
}

View File

@ -92,7 +92,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
replica.close("test", false);
final List<IndexCommit> commits = DirectoryReader.listCommits(replica.store().directory());
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setSoftDeletesField(Lucene.SOFT_DELETE_FIELD)
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
.setCommitOnClose(false)
.setMergePolicy(NoMergePolicy.INSTANCE)
.setOpenMode(IndexWriterConfig.OpenMode.APPEND);

View File

@ -0,0 +1,141 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.ingest;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.function.Consumer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.MockScriptEngine;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.core.Is.is;
public class ConditionalProcessorTests extends ESTestCase {
public void testChecksCondition() throws Exception {
String conditionalField = "field1";
String scriptName = "conditionalScript";
String trueValue = "truthy";
ScriptService scriptService = new ScriptService(Settings.builder().build(),
Collections.singletonMap(
Script.DEFAULT_SCRIPT_LANG,
new MockScriptEngine(
Script.DEFAULT_SCRIPT_LANG,
Collections.singletonMap(
scriptName, ctx -> trueValue.equals(ctx.get(conditionalField))
)
)
),
new HashMap<>(ScriptModule.CORE_CONTEXTS)
);
Map<String, Object> document = new HashMap<>();
ConditionalProcessor processor = new ConditionalProcessor(
randomAlphaOfLength(10),
new Script(
ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG,
scriptName, Collections.emptyMap()), scriptService,
new Processor() {
@Override
public void execute(final IngestDocument ingestDocument) throws Exception {
ingestDocument.setFieldValue("foo", "bar");
}
@Override
public String getType() {
return null;
}
@Override
public String getTag() {
return null;
}
});
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
ingestDocument.setFieldValue(conditionalField, trueValue);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(trueValue));
assertThat(ingestDocument.getSourceAndMetadata().get("foo"), is("bar"));
String falseValue = "falsy";
ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
ingestDocument.setFieldValue(conditionalField, falseValue);
processor.execute(ingestDocument);
assertThat(ingestDocument.getSourceAndMetadata().get(conditionalField), is(falseValue));
assertThat(ingestDocument.getSourceAndMetadata(), not(hasKey("foo")));
}
@SuppressWarnings("unchecked")
public void testActsOnImmutableData() throws Exception {
assertMutatingCtxThrows(ctx -> ctx.remove("foo"));
assertMutatingCtxThrows(ctx -> ctx.put("foo", "bar"));
assertMutatingCtxThrows(ctx -> ((List<Object>)ctx.get("listField")).add("bar"));
assertMutatingCtxThrows(ctx -> ((List<Object>)ctx.get("listField")).remove("bar"));
}
private static void assertMutatingCtxThrows(Consumer<Map<String, Object>> mutation) throws Exception {
String scriptName = "conditionalScript";
CompletableFuture<Exception> expectedException = new CompletableFuture<>();
ScriptService scriptService = new ScriptService(Settings.builder().build(),
Collections.singletonMap(
Script.DEFAULT_SCRIPT_LANG,
new MockScriptEngine(
Script.DEFAULT_SCRIPT_LANG,
Collections.singletonMap(
scriptName, ctx -> {
try {
mutation.accept(ctx);
} catch (Exception e) {
expectedException.complete(e);
}
return false;
}
)
)
),
new HashMap<>(ScriptModule.CORE_CONTEXTS)
);
Map<String, Object> document = new HashMap<>();
ConditionalProcessor processor = new ConditionalProcessor(
randomAlphaOfLength(10),
new Script(
ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG,
scriptName, Collections.emptyMap()), scriptService, null
);
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document);
ingestDocument.setFieldValue("listField", new ArrayList<>());
processor.execute(ingestDocument);
Exception e = expectedException.get();
assertThat(e, instanceOf(UnsupportedOperationException.class));
assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage());
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTestCase;
import org.junit.Before;
@ -38,6 +39,9 @@ import static org.hamcrest.Matchers.sameInstance;
import static org.mockito.Mockito.mock;
public class ConfigurationUtilsTests extends ESTestCase {
private final ScriptService scriptService = mock(ScriptService.class);
private Map<String, Object> config;
@Before
@ -120,7 +124,7 @@ public class ConfigurationUtilsTests extends ESTestCase {
config.add(Collections.singletonMap("test_processor", emptyConfig));
config.add(Collections.singletonMap("test_processor", emptyConfig));
List<Processor> result = ConfigurationUtils.readProcessorConfigs(config, registry);
List<Processor> result = ConfigurationUtils.readProcessorConfigs(config, scriptService, registry);
assertThat(result.size(), equalTo(2));
assertThat(result.get(0), sameInstance(processor));
assertThat(result.get(1), sameInstance(processor));
@ -129,7 +133,7 @@ public class ConfigurationUtilsTests extends ESTestCase {
unknownTaggedConfig.put("tag", "my_unknown");
config.add(Collections.singletonMap("unknown_processor", unknownTaggedConfig));
ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class,
() -> ConfigurationUtils.readProcessorConfigs(config, registry));
() -> ConfigurationUtils.readProcessorConfigs(config, scriptService, registry));
assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]"));
assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown")));
assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor")));
@ -142,7 +146,10 @@ public class ConfigurationUtilsTests extends ESTestCase {
Map<String, Object> secondUnknonwTaggedConfig = new HashMap<>();
secondUnknonwTaggedConfig.put("tag", "my_second_unknown");
config2.add(Collections.singletonMap("second_unknown_processor", secondUnknonwTaggedConfig));
e = expectThrows(ElasticsearchParseException.class, () -> ConfigurationUtils.readProcessorConfigs(config2, registry));
e = expectThrows(
ElasticsearchParseException.class,
() -> ConfigurationUtils.readProcessorConfigs(config2, scriptService, registry)
);
assertThat(e.getMessage(), equalTo("No processor type exists with name [unknown_processor]"));
assertThat(e.getMetadata("es.processor_tag"), equalTo(Collections.singletonList("my_unknown")));
assertThat(e.getMetadata("es.processor_type"), equalTo(Collections.singletonList("unknown_processor")));
@ -166,17 +173,17 @@ public class ConfigurationUtilsTests extends ESTestCase {
});
Object emptyConfig = Collections.emptyMap();
Processor processor1 = ConfigurationUtils.readProcessor(registry, "script", emptyConfig);
Processor processor1 = ConfigurationUtils.readProcessor(registry, scriptService, "script", emptyConfig);
assertThat(processor1, sameInstance(processor));
Object inlineScript = "test_script";
Processor processor2 = ConfigurationUtils.readProcessor(registry, "script", inlineScript);
Processor processor2 = ConfigurationUtils.readProcessor(registry, scriptService, "script", inlineScript);
assertThat(processor2, sameInstance(processor));
Object invalidConfig = 12L;
ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class,
() -> ConfigurationUtils.readProcessor(registry, "unknown_processor", invalidConfig));
() -> ConfigurationUtils.readProcessor(registry, scriptService, "unknown_processor", invalidConfig));
assertThat(ex.getMessage(), equalTo("property isn't a map, but of type [" + invalidConfig.getClass().getName() + "]"));
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.ingest;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTestCase;
import java.util.Arrays;
@ -32,11 +33,13 @@ import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
import static org.mockito.Mockito.mock;
public class PipelineFactoryTests extends ESTestCase {
private final Integer version = randomBoolean() ? randomInt() : null;
private final String versionString = version != null ? Integer.toString(version) : null;
private final ScriptService scriptService = mock(ScriptService.class);
public void testCreate() throws Exception {
Map<String, Object> processorConfig0 = new HashMap<>();
@ -48,7 +51,7 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.PROCESSORS_KEY,
Arrays.asList(Collections.singletonMap("test", processorConfig0), Collections.singletonMap("test", processorConfig1)));
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry);
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
assertThat(pipeline.getVersion(), equalTo(version));
@ -64,7 +67,7 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
try {
Pipeline.create("_id", pipelineConfig, Collections.emptyMap());
Pipeline.create("_id", pipelineConfig, Collections.emptyMap(), scriptService);
fail("should fail, missing required [processors] field");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), equalTo("[processors] required property is missing"));
@ -76,7 +79,7 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.DESCRIPTION_KEY, "_description");
pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.emptyList());
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, null);
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, null, scriptService);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
assertThat(pipeline.getVersion(), equalTo(version));
@ -91,7 +94,7 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry);
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
assertThat(pipeline.getVersion(), equalTo(version));
@ -109,7 +112,10 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
pipelineConfig.put(Pipeline.ON_FAILURE_KEY, Collections.emptyList());
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry));
Exception e = expectThrows(
ElasticsearchParseException.class,
() -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService)
);
assertThat(e.getMessage(), equalTo("pipeline [_id] cannot have an empty on_failure option defined"));
}
@ -121,7 +127,10 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry));
Exception e = expectThrows(
ElasticsearchParseException.class,
() -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService)
);
assertThat(e.getMessage(), equalTo("[on_failure] processors list cannot be empty"));
}
@ -136,7 +145,7 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.PROCESSORS_KEY,
Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry);
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
assertThat(pipeline.getVersion(), equalTo(version));
@ -156,7 +165,10 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
Exception e = expectThrows(ElasticsearchParseException.class, () -> Pipeline.create("_id", pipelineConfig, processorRegistry));
Exception e = expectThrows(
ElasticsearchParseException.class,
() -> Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService)
);
assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]"));
}
@ -169,7 +181,7 @@ public class PipelineFactoryTests extends ESTestCase {
pipelineConfig.put(Pipeline.VERSION_KEY, versionString);
pipelineConfig.put(Pipeline.PROCESSORS_KEY, Collections.singletonList(Collections.singletonMap("test", processorConfig)));
Map<String, Processor.Factory> processorRegistry = Collections.singletonMap("test", new TestProcessor.Factory());
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry);
Pipeline pipeline = Pipeline.create("_id", pipelineConfig, processorRegistry, scriptService);
assertThat(pipeline.getId(), equalTo("_id"));
assertThat(pipeline.getDescription(), equalTo("_description"));
assertThat(pipeline.getVersion(), equalTo(version));

Some files were not shown because too many files have changed in this diff Show More