mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 01:19:02 +00:00
Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
e1fe0dc462
@ -23,6 +23,7 @@ import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
|
||||
import org.elasticsearch.gradle.plugin.PluginPropertiesExtension
|
||||
import org.gradle.api.AntBuilder
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
@ -30,6 +31,7 @@ import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.artifacts.Dependency
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.logging.Logger
|
||||
import org.gradle.api.tasks.Copy
|
||||
@ -82,12 +84,17 @@ class ClusterFormationTasks {
|
||||
// from mirrors using gradles built-in mechanism etc.
|
||||
project.configurations {
|
||||
elasticsearchBwcDistro
|
||||
elasticsearchBwcPlugins
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
|
||||
for (Map.Entry<String, Project> entry : config.plugins.entrySet()) {
|
||||
configureBwcPluginDependency("${task.name}_elasticsearchBwcPlugins", project, entry.getValue(),
|
||||
project.configurations.elasticsearchBwcPlugins, config.bwcVersion)
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < config.numNodes; i++) {
|
||||
// we start N nodes and out of these N nodes there might be M bwc nodes.
|
||||
// for each of those nodes we might have a different configuratioon
|
||||
// for each of those nodes we might have a different configuration
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
Configuration distro = currentDistro
|
||||
if (i < config.numBwcNodes) {
|
||||
@ -116,6 +123,13 @@ class ClusterFormationTasks {
|
||||
project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}")
|
||||
}
|
||||
|
||||
/** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */
|
||||
static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, String elasticsearchVersion) {
|
||||
verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject)
|
||||
PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin');
|
||||
project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${extension.name}:${elasticsearchVersion}@zip")
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds dependent tasks to start an elasticsearch cluster before the given task is executed,
|
||||
* and stop it after it has finished executing.
|
||||
@ -147,7 +161,13 @@ class ClusterFormationTasks {
|
||||
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node, seedNode)
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
if (node.config.plugins.isEmpty() == false) {
|
||||
if (node.nodeVersion == VersionProperties.elasticsearch) {
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
} else {
|
||||
setup = configureCopyBwcPluginsTask(taskName(task, node, 'copyBwcPlugins'), project, setup, node)
|
||||
}
|
||||
}
|
||||
|
||||
// install modules
|
||||
for (Project module : node.config.modules) {
|
||||
@ -317,18 +337,13 @@ class ClusterFormationTasks {
|
||||
* to the test resources for this project.
|
||||
*/
|
||||
static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
if (node.config.plugins.isEmpty()) {
|
||||
return setup
|
||||
}
|
||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
|
||||
List<FileCollection> pluginFiles = []
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
|
||||
Project pluginProject = plugin.getValue()
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
|
||||
}
|
||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||
String configurationName = "_plugin_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
@ -358,6 +373,31 @@ class ClusterFormationTasks {
|
||||
return copyPlugins
|
||||
}
|
||||
|
||||
/** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */
|
||||
static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||
String configurationName = "_plugin_bwc_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
|
||||
final String depName = pluginProject.extensions.findByName('esplugin').name
|
||||
Dependency dep = project.configurations.elasticsearchBwcPlugins.dependencies.find {
|
||||
it.name == depName
|
||||
}
|
||||
configuration.dependencies.add(dep)
|
||||
}
|
||||
|
||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) {
|
||||
from project.configurations.elasticsearchBwcPlugins
|
||||
into node.pluginsTmpDir
|
||||
}
|
||||
return copyPlugins
|
||||
}
|
||||
|
||||
static Task configureInstallModuleTask(String name, Project project, Task setup, NodeInfo node, Project module) {
|
||||
if (node.config.distribution != 'integ-test-zip') {
|
||||
throw new GradleException("Module ${module.path} not allowed be installed distributions other than integ-test-zip because they should already have all modules bundled!")
|
||||
@ -373,7 +413,12 @@ class ClusterFormationTasks {
|
||||
}
|
||||
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin) {
|
||||
FileCollection pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
final FileCollection pluginZip;
|
||||
if (node.nodeVersion != VersionProperties.elasticsearch) {
|
||||
pluginZip = project.configurations.getByName("_plugin_bwc_${plugin.path}")
|
||||
} else {
|
||||
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
}
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
|
||||
@ -623,4 +668,11 @@ class ClusterFormationTasks {
|
||||
project.ant.project.removeBuildListener(listener)
|
||||
return retVal
|
||||
}
|
||||
|
||||
static void verifyProjectHasBuildPlugin(String name, String version, Project project, Project pluginProject) {
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task [${name}] cannot add plugin [${pluginProject.path}] with version [${version}] to project's " +
|
||||
"[${project.path}] dependencies: the plugin is not an esplugin")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -93,6 +93,9 @@ class NodeInfo {
|
||||
/** buffer for ant output when starting this node */
|
||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
|
||||
/** the version of elasticsearch that this node runs */
|
||||
String nodeVersion
|
||||
|
||||
/** Creates a node to run as part of a cluster for the given task */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) {
|
||||
this.config = config
|
||||
@ -105,6 +108,7 @@ class NodeInfo {
|
||||
}
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
this.nodeVersion = nodeVersion
|
||||
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
|
||||
confDir = confDir(baseDir, config.distribution, nodeVersion)
|
||||
if (config.dataDir != null) {
|
||||
|
@ -40,7 +40,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]settings[/\\]SettingsUpdater.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]shards[/\\]TransportClusterSearchShardsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]create[/\\]CreateSnapshotRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]create[/\\]TransportCreateSnapshotAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]snapshots[/\\]delete[/\\]DeleteSnapshotRequestBuilder.java" checks="LineLength" />
|
||||
@ -223,8 +222,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]support[/\\]AbstractClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]support[/\\]TransportProxyClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterStateObserver.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterStateUpdateTask.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]DiffableUtils.java" checks="LineLength" />
|
||||
|
@ -55,13 +55,36 @@ public class PreBuiltTransportClient extends TransportClient {
|
||||
PercolatorPlugin.class,
|
||||
MustachePlugin.class));
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins an optional array of additional plugins to run with this client
|
||||
*/
|
||||
@SafeVarargs
|
||||
public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) {
|
||||
this(settings, Arrays.asList(plugins));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins a collection of additional plugins to run with this client
|
||||
*/
|
||||
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS));
|
||||
this(settings, plugins, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new transport client with pre-installed plugins.
|
||||
* @param settings the settings passed to this transport client
|
||||
* @param plugins a collection of additional plugins to run with this client
|
||||
* @param hostFailureListener a failure listener that is invoked if a node is disconnected. This can be <code>null</code>
|
||||
*/
|
||||
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins,
|
||||
HostFailureListener hostFailureListener) {
|
||||
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS), hostFailureListener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -49,9 +49,19 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VAL
|
||||
*/
|
||||
public class ElasticsearchException extends RuntimeException implements ToXContent, Writeable {
|
||||
|
||||
public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
public static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0);
|
||||
static final Version UNKNOWN_VERSION_ADDED = Version.fromId(0);
|
||||
|
||||
/**
|
||||
* Passed in the {@link Params} of {@link #toXContent(XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, Throwable)}
|
||||
* to control if the {@code caused_by} element should render. Unlike most parameters to {@code toXContent} methods this parameter is
|
||||
* internal only and not available as a URL parameter.
|
||||
*/
|
||||
public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip";
|
||||
/**
|
||||
* Passed in the {@link Params} of {@link #toXContent(XContentBuilder, org.elasticsearch.common.xcontent.ToXContent.Params, Throwable)}
|
||||
* to control if the {@code stack_trace} element should render. Unlike most parameters to {@code toXContent} methods this parameter is
|
||||
* internal only and not available as a URL parameter. Use the {@code error_trace} parameter instead.
|
||||
*/
|
||||
public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip";
|
||||
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
|
||||
public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
|
||||
@ -307,7 +317,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
|
||||
/**
|
||||
* Statis toXContent helper method that also renders non {@link org.elasticsearch.ElasticsearchException} instances as XContent.
|
||||
* Static toXContent helper method that also renders non {@link org.elasticsearch.ElasticsearchException} instances as XContent.
|
||||
*/
|
||||
public static void toXContent(XContentBuilder builder, Params params, Throwable ex) throws IOException {
|
||||
ex = ExceptionsHelper.unwrapCause(ex);
|
||||
@ -709,9 +719,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
STATUS_EXCEPTION(org.elasticsearch.ElasticsearchStatusException.class, org.elasticsearch.ElasticsearchStatusException::new, 145,
|
||||
UNKNOWN_VERSION_ADDED),
|
||||
TASK_CANCELLED_EXCEPTION(org.elasticsearch.tasks.TaskCancelledException.class,
|
||||
org.elasticsearch.tasks.TaskCancelledException::new, 146, UNKNOWN_VERSION_ADDED),
|
||||
org.elasticsearch.tasks.TaskCancelledException::new, 146, Version.V_5_1_0_UNRELEASED),
|
||||
SHARD_LOCK_OBTAIN_FAILED_EXCEPTION(org.elasticsearch.env.ShardLockObtainFailedException.class,
|
||||
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, V_5_1_0_UNRELEASED);
|
||||
org.elasticsearch.env.ShardLockObtainFailedException::new, 147, Version.V_5_0_2_UNRELEASED);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
@ -853,4 +863,5 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -75,6 +75,8 @@ public class Version {
|
||||
public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_2_4_1_ID = 2040199;
|
||||
public static final Version V_2_4_1 = new Version(V_2_4_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_2_4_2_ID = 2040299;
|
||||
public static final Version V_2_4_2 = new Version(V_2_4_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
@ -93,18 +95,18 @@ public class Version {
|
||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final int V_5_0_1_ID = 5000199;
|
||||
public static final Version V_5_0_1 = new Version(V_5_0_1_ID, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha1;
|
||||
public static final int V_5_0_2_ID_UNRELEASED = 5000299;
|
||||
public static final Version V_5_0_2_UNRELEASED = new Version(V_5_0_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_2_1);
|
||||
public static final int V_5_1_0_ID_UNRELEASED = 5010099;
|
||||
public static final Version V_5_1_0_UNRELEASED = new Version(V_5_1_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_5_2_0_ID_UNRELEASED = 5020099;
|
||||
public static final Version V_5_2_0_UNRELEASED = new Version(V_5_2_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_3_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED;
|
||||
|
||||
/* NOTE: don't add unreleased version to this list except of the version assigned to CURRENT.
|
||||
* If you need a version that doesn't exist here for instance V_5_1_0 then go and create such a version
|
||||
* as a constant where you need it:
|
||||
* <pre>
|
||||
* public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
* </pre>
|
||||
* Then go to VersionsTest.java and add a test for this constant VersionTests#testUnknownVersions().
|
||||
* This is particularly useful if you are building a feature that needs a BWC layer for this unreleased version etc.*/
|
||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
@ -117,8 +119,14 @@ public class Version {
|
||||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_6_0_0_alpha1_ID_UNRELEASED:
|
||||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_2_0_ID_UNRELEASED:
|
||||
return V_5_2_0_UNRELEASED;
|
||||
case V_5_1_0_ID_UNRELEASED:
|
||||
return V_5_1_0_UNRELEASED;
|
||||
case V_5_0_2_ID_UNRELEASED:
|
||||
return V_5_0_2_UNRELEASED;
|
||||
case V_5_0_1_ID:
|
||||
return V_5_0_1;
|
||||
case V_5_0_0_ID:
|
||||
@ -137,6 +145,8 @@ public class Version {
|
||||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_4_2_ID:
|
||||
return V_2_4_2;
|
||||
case V_2_4_1_ID:
|
||||
return V_2_4_1;
|
||||
case V_2_4_0_ID:
|
||||
@ -304,8 +314,8 @@ public class Version {
|
||||
public Version minimumCompatibilityVersion() {
|
||||
final int bwcMajor;
|
||||
final int bwcMinor;
|
||||
if (this.onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
bwcMajor = major-1;
|
||||
if (this.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
bwcMajor = major - 1;
|
||||
bwcMinor = 0; // TODO we have to move this to the latest released minor of the last major but for now we just keep
|
||||
} else {
|
||||
bwcMajor = major;
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.shards;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
@ -29,14 +30,15 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSearchShardsRequest> implements IndicesRequest.Replaceable {
|
||||
private String[] indices;
|
||||
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
@Nullable
|
||||
private String routing;
|
||||
@Nullable
|
||||
private String preference;
|
||||
private String[] types = Strings.EMPTY_ARRAY;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.lenientExpandOpen();
|
||||
|
||||
|
||||
@ -57,14 +59,9 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
||||
*/
|
||||
@Override
|
||||
public ClusterSearchShardsRequest indices(String... indices) {
|
||||
if (indices == null) {
|
||||
throw new IllegalArgumentException("indices must not be null");
|
||||
} else {
|
||||
for (int i = 0; i < indices.length; i++) {
|
||||
if (indices[i] == null) {
|
||||
throw new IllegalArgumentException("indices[" + i + "] must not be null");
|
||||
}
|
||||
}
|
||||
Objects.requireNonNull(indices, "indices must not be null");
|
||||
for (int i = 0; i < indices.length; i++) {
|
||||
Objects.requireNonNull(indices[i], "indices[" + i + "] must not be null");
|
||||
}
|
||||
this.indices = indices;
|
||||
return this;
|
||||
@ -88,23 +85,6 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The document types to execute the search against. Defaults to be executed against
|
||||
* all types.
|
||||
*/
|
||||
public String[] types() {
|
||||
return types;
|
||||
}
|
||||
|
||||
/**
|
||||
* The document types to execute the search against. Defaults to be executed against
|
||||
* all types.
|
||||
*/
|
||||
public ClusterSearchShardsRequest types(String... types) {
|
||||
this.types = types;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A comma separated list of routing values to control the shards the search will be executed on.
|
||||
*/
|
||||
@ -154,7 +134,10 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
||||
routing = in.readOptionalString();
|
||||
preference = in.readOptionalString();
|
||||
|
||||
types = in.readStringArray();
|
||||
if (in.getVersion().onOrBefore(Version.V_5_1_0_UNRELEASED)) {
|
||||
//types
|
||||
in.readStringArray();
|
||||
}
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
}
|
||||
|
||||
@ -170,7 +153,10 @@ public class ClusterSearchShardsRequest extends MasterNodeReadRequest<ClusterSea
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(preference);
|
||||
|
||||
out.writeStringArray(types);
|
||||
if (out.getVersion().onOrBefore(Version.V_5_1_0_UNRELEASED)) {
|
||||
//types
|
||||
out.writeStringArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
|
||||
|
@ -37,15 +37,6 @@ public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRe
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The document types to execute the search against. Defaults to be executed against
|
||||
* all types.
|
||||
*/
|
||||
public ClusterSearchShardsRequestBuilder setTypes(String... types) {
|
||||
request.types(types);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A comma separated list of routing values to control the shards the search will be executed on.
|
||||
*/
|
||||
|
@ -19,24 +19,36 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.shards;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class ClusterSearchShardsResponse extends ActionResponse implements ToXContent {
|
||||
|
||||
private ClusterSearchShardsGroup[] groups;
|
||||
private DiscoveryNode[] nodes;
|
||||
private Map<String, AliasFilter> indicesAndFilters;
|
||||
|
||||
ClusterSearchShardsResponse() {
|
||||
|
||||
}
|
||||
|
||||
ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes,
|
||||
Map<String, AliasFilter> indicesAndFilters) {
|
||||
this.groups = groups;
|
||||
this.nodes = nodes;
|
||||
this.indicesAndFilters = indicesAndFilters;
|
||||
}
|
||||
|
||||
public ClusterSearchShardsGroup[] getGroups() {
|
||||
return groups;
|
||||
}
|
||||
@ -45,9 +57,8 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||
return nodes;
|
||||
}
|
||||
|
||||
public ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes) {
|
||||
this.groups = groups;
|
||||
this.nodes = nodes;
|
||||
public Map<String, AliasFilter> getIndicesAndFilters() {
|
||||
return indicesAndFilters;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -61,7 +72,15 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
nodes[i] = new DiscoveryNode(in);
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
int size = in.readVInt();
|
||||
indicesAndFilters = new HashMap<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
String index = in.readString();
|
||||
AliasFilter aliasFilter = new AliasFilter(in);
|
||||
indicesAndFilters.put(index, aliasFilter);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -75,6 +94,13 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||
for (DiscoveryNode node : nodes) {
|
||||
node.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeVInt(indicesAndFilters.size());
|
||||
for (Map.Entry<String, AliasFilter> entry : indicesAndFilters.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -84,6 +110,20 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||
node.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
if (indicesAndFilters != null) {
|
||||
builder.startObject("indices");
|
||||
for (Map.Entry<String, AliasFilter> entry : indicesAndFilters.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
builder.startObject(index);
|
||||
AliasFilter aliasFilter = entry.getValue();
|
||||
if (aliasFilter.getQueryBuilder() != null) {
|
||||
builder.field("filter");
|
||||
aliasFilter.getQueryBuilder().toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startArray("shards");
|
||||
for (ClusterSearchShardsGroup group : groups) {
|
||||
group.toXContent(builder, params);
|
||||
@ -91,4 +131,5 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
|
||||
builder.endArray();
|
||||
return builder;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,21 +33,29 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class TransportClusterSearchShardsAction extends TransportMasterNodeReadAction<ClusterSearchShardsRequest, ClusterSearchShardsResponse> {
|
||||
public class TransportClusterSearchShardsAction extends
|
||||
TransportMasterNodeReadAction<ClusterSearchShardsRequest, ClusterSearchShardsResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@Inject
|
||||
public TransportClusterSearchShardsAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClusterSearchShardsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterSearchShardsRequest::new);
|
||||
IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClusterSearchShardsAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterSearchShardsRequest::new);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -58,7 +66,8 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ,
|
||||
indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -67,12 +76,20 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) {
|
||||
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterSearchShardsResponse> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices());
|
||||
Map<String, AliasFilter> indicesAndFilters = new HashMap<>();
|
||||
for (String index : concreteIndices) {
|
||||
AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, request.indices());
|
||||
indicesAndFilters.put(index, aliasFilter);
|
||||
}
|
||||
|
||||
Set<String> nodeIds = new HashSet<>();
|
||||
GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices,
|
||||
routingMap, request.preference());
|
||||
ShardRouting shard;
|
||||
ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];
|
||||
int currentGroup = 0;
|
||||
@ -92,6 +109,6 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
||||
for (String nodeId : nodeIds) {
|
||||
nodes[currentNode++] = clusterState.getNodes().get(nodeId);
|
||||
}
|
||||
listener.onResponse(new ClusterSearchShardsResponse(groupResponses, nodes));
|
||||
listener.onResponse(new ClusterSearchShardsResponse(groupResponses, nodes, indicesAndFilters));
|
||||
}
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
||||
cause = in.readString();
|
||||
name = in.readString();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
indexPatterns = in.readList(StreamInput::readString);
|
||||
} else {
|
||||
indexPatterns = Collections.singletonList(in.readString());
|
||||
@ -501,7 +501,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
||||
super.writeTo(out);
|
||||
out.writeString(cause);
|
||||
out.writeString(name);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeStringList(indexPatterns);
|
||||
} else {
|
||||
out.writeString(indexPatterns.size() > 0 ? indexPatterns.get(0) : "");
|
||||
|
@ -302,7 +302,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
id = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
opType = OpType.fromId(in.readByte());
|
||||
} else {
|
||||
opType = OpType.fromString(in.readString());
|
||||
@ -328,7 +328,7 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(id);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeByte(opType.getId());
|
||||
} else {
|
||||
out.writeString(opType.getLowercase());
|
||||
|
@ -126,12 +126,12 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
AliasFilter filter = this.aliasFilter.get(shard.index().getName());
|
||||
ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shard, shardsIts.size(),
|
||||
ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shardIt.shardId(), shardsIts.size(),
|
||||
filter, startTime());
|
||||
sendExecuteFirstPhase(node, transportRequest , new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onFirstPhaseResult(shardIndex, shard, result, shardIt);
|
||||
onFirstPhaseResult(shardIndex, shard.currentNodeId(), result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -143,8 +143,8 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
|
||||
private void onFirstPhaseResult(int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(nodeId, shardIt.shardId()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
@ -173,11 +173,11 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
private void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
final ShardIterator shardIt, Exception e) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId());
|
||||
addShardFailure(shardIndex, shardTarget, e);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
|
@ -83,7 +83,7 @@ public class SearchPhaseController extends AbstractComponent {
|
||||
private final BigArrays bigArrays;
|
||||
private final ScriptService scriptService;
|
||||
|
||||
SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) {
|
||||
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService) {
|
||||
super(settings);
|
||||
this.bigArrays = bigArrays;
|
||||
this.scriptService = scriptService;
|
||||
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
@ -36,6 +37,7 @@ import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
@ -45,12 +47,15 @@ import java.util.Objects;
|
||||
* Note, the search {@link #source(org.elasticsearch.search.builder.SearchSourceBuilder)}
|
||||
* is required. The search source is the different search options, including aggregations and such.
|
||||
* </p>
|
||||
*
|
||||
* @see org.elasticsearch.client.Requests#searchRequest(String...)
|
||||
* @see org.elasticsearch.client.Client#search(SearchRequest)
|
||||
* @see SearchResponse
|
||||
*/
|
||||
public final class SearchRequest extends ActionRequest implements IndicesRequest.Replaceable {
|
||||
|
||||
private static final ToXContent.Params FORMAT_PARAMS = new ToXContent.MapParams(Collections.singletonMap("pretty", "false"));
|
||||
|
||||
private SearchType searchType = SearchType.DEFAULT;
|
||||
|
||||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
@ -279,7 +284,26 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new SearchTask(id, type, action, getDescription(), parentTaskId);
|
||||
// generating description in a lazy way since source can be quite big
|
||||
return new SearchTask(id, type, action, null, parentTaskId) {
|
||||
@Override
|
||||
public String getDescription() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("indices[");
|
||||
Strings.arrayToDelimitedString(indices, ",", sb);
|
||||
sb.append("], ");
|
||||
sb.append("types[");
|
||||
Strings.arrayToDelimitedString(types, ",", sb);
|
||||
sb.append("], ");
|
||||
sb.append("search_type[").append(searchType).append("], ");
|
||||
if (source != null) {
|
||||
sb.append("source[").append(source.toString(FORMAT_PARAMS)).append("]");
|
||||
} else {
|
||||
sb.append("source[]");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -139,4 +139,10 @@ public class SearchScrollRequest extends ActionRequest {
|
||||
", scroll=" + scroll +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "scrollId[" + scrollId + "], scroll[" + scroll + "]";
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -23,8 +23,6 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchTask;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
@ -75,7 +73,7 @@ public class SearchTransportService extends AbstractComponent {
|
||||
|
||||
private final TransportService transportService;
|
||||
|
||||
SearchTransportService(Settings settings, TransportService transportService) {
|
||||
public SearchTransportService(Settings settings, TransportService transportService) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
}
|
||||
|
@ -51,10 +51,11 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
@Inject
|
||||
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ClusterService clusterService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchTransportService searchTransportService) {
|
||||
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService);
|
||||
this.searchTransportService = searchTransportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -32,8 +32,6 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
@ -62,13 +60,13 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
private final SearchService searchService;
|
||||
|
||||
@Inject
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService,
|
||||
TransportService transportService, SearchService searchService,
|
||||
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, SearchService searchService,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
|
||||
ClusterService clusterService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
|
||||
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService);
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
SearchTransportService.registerRequestHandler(transportService, searchService);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
|
@ -26,8 +26,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
@ -43,14 +41,15 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollAction(Settings settings, BigArrays bigArrays, ThreadPool threadPool, ScriptService scriptService,
|
||||
TransportService transportService, ClusterService clusterService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService);
|
||||
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService);
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,10 +69,16 @@ public abstract class ToXContentToBytes implements ToXContent {
|
||||
|
||||
@Override
|
||||
public final String toString() {
|
||||
return toString(EMPTY_PARAMS);
|
||||
}
|
||||
|
||||
public final String toString(Params params) {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.prettyPrint();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
if (params.paramAsBoolean("pretty", true)) {
|
||||
builder.prettyPrint();
|
||||
}
|
||||
toXContent(builder, params);
|
||||
return builder.string();
|
||||
} catch (Exception e) {
|
||||
// So we have a stack trace logged somewhere
|
||||
|
@ -21,10 +21,10 @@ package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.net.SocketPermission;
|
||||
import java.net.URL;
|
||||
import java.io.FilePermission;
|
||||
import java.io.IOException;
|
||||
import java.net.SocketPermission;
|
||||
import java.net.URL;
|
||||
import java.security.CodeSource;
|
||||
import java.security.Permission;
|
||||
import java.security.PermissionCollection;
|
||||
@ -32,6 +32,7 @@ import java.security.Permissions;
|
||||
import java.security.Policy;
|
||||
import java.security.ProtectionDomain;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/** custom policy for union of static and dynamic permissions */
|
||||
final class ESPolicy extends Policy {
|
||||
@ -133,18 +134,66 @@ final class ESPolicy extends Policy {
|
||||
|
||||
// TODO: remove this hack when insecure defaults are removed from java
|
||||
|
||||
/**
|
||||
* Wraps a bad default permission, applying a pre-implies to any permissions before checking if the wrapped bad default permission
|
||||
* implies a permission.
|
||||
*/
|
||||
private static class BadDefaultPermission extends Permission {
|
||||
|
||||
private final Permission badDefaultPermission;
|
||||
private final Predicate<Permission> preImplies;
|
||||
|
||||
/**
|
||||
* Construct an instance with a pre-implies check to apply to desired permissions.
|
||||
*
|
||||
* @param badDefaultPermission the bad default permission to wrap
|
||||
* @param preImplies a test that is applied to a desired permission before checking if the bad default permission that
|
||||
* this instance wraps implies the desired permission
|
||||
*/
|
||||
public BadDefaultPermission(final Permission badDefaultPermission, final Predicate<Permission> preImplies) {
|
||||
super(badDefaultPermission.getName());
|
||||
this.badDefaultPermission = badDefaultPermission;
|
||||
this.preImplies = preImplies;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean implies(Permission permission) {
|
||||
return preImplies.test(permission) && badDefaultPermission.implies(permission);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean equals(Object obj) {
|
||||
return badDefaultPermission.equals(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return badDefaultPermission.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getActions() {
|
||||
return badDefaultPermission.getActions();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// default policy file states:
|
||||
// "It is strongly recommended that you either remove this permission
|
||||
// from this policy file or further restrict it to code sources
|
||||
// that you specify, because Thread.stop() is potentially unsafe."
|
||||
// not even sure this method still works...
|
||||
static final Permission BAD_DEFAULT_NUMBER_ONE = new RuntimePermission("stopThread");
|
||||
private static final Permission BAD_DEFAULT_NUMBER_ONE = new BadDefaultPermission(new RuntimePermission("stopThread"), p -> true);
|
||||
|
||||
// default policy file states:
|
||||
// "allows anyone to listen on dynamic ports"
|
||||
// specified exactly because that is what we want, and fastest since it won't imply any
|
||||
// expensive checks for the implicit "resolve"
|
||||
static final Permission BAD_DEFAULT_NUMBER_TWO = new SocketPermission("localhost:0", "listen");
|
||||
private static final Permission BAD_DEFAULT_NUMBER_TWO =
|
||||
new BadDefaultPermission(
|
||||
new SocketPermission("localhost:0", "listen"),
|
||||
// we apply this pre-implies test because some SocketPermission#implies calls do expensive reverse-DNS resolves
|
||||
p -> p instanceof SocketPermission && p.getActions().contains("listen"));
|
||||
|
||||
/**
|
||||
* Wraps the Java system policy, filtering out bad default permissions that
|
||||
@ -159,7 +208,7 @@ final class ESPolicy extends Policy {
|
||||
|
||||
@Override
|
||||
public boolean implies(ProtectionDomain domain, Permission permission) {
|
||||
if (BAD_DEFAULT_NUMBER_ONE.equals(permission) || BAD_DEFAULT_NUMBER_TWO.equals(permission)) {
|
||||
if (BAD_DEFAULT_NUMBER_ONE.implies(permission) || BAD_DEFAULT_NUMBER_TWO.implies(permission)) {
|
||||
return false;
|
||||
}
|
||||
return delegate.implies(domain, permission);
|
||||
|
@ -272,35 +272,10 @@ final class Security {
|
||||
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to.
|
||||
* @param settings the {@link Settings} instance to read the HTTP and transport settings from
|
||||
*/
|
||||
static void addBindPermissions(Permissions policy, Settings settings) {
|
||||
private static void addBindPermissions(Permissions policy, Settings settings) {
|
||||
addSocketPermissionForHttp(policy, settings);
|
||||
// transport is waaaay overengineered
|
||||
Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups();
|
||||
if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) {
|
||||
profiles = new HashMap<>(profiles);
|
||||
profiles.put(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY);
|
||||
}
|
||||
|
||||
// loop through all profiles and add permissions for each one, if its valid.
|
||||
// (otherwise Netty transports are lenient and ignores it)
|
||||
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
||||
Settings profileSettings = entry.getValue();
|
||||
String name = entry.getKey();
|
||||
|
||||
// a profile is only valid if its the default profile, or if it has an actual name and specifies a port
|
||||
boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null);
|
||||
if (valid) {
|
||||
addSocketPermissionForTransportProfile(policy, profileSettings, settings);
|
||||
}
|
||||
}
|
||||
|
||||
for (final Settings tribeNodeSettings : settings.getGroups("tribe", true).values()) {
|
||||
// tribe nodes have HTTP disabled by default, so we check if HTTP is enabled before granting
|
||||
if (NetworkModule.HTTP_ENABLED.exists(tribeNodeSettings) && NetworkModule.HTTP_ENABLED.get(tribeNodeSettings)) {
|
||||
addSocketPermissionForHttp(policy, tribeNodeSettings);
|
||||
}
|
||||
addSocketPermissionForTransport(policy, tribeNodeSettings);
|
||||
}
|
||||
addSocketPermissionForTransportProfiles(policy, settings);
|
||||
addSocketPermissionForTribeNodes(policy, settings);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -320,18 +295,33 @@ final class Security {
|
||||
* the transport profile specified by {@code profileSettings} and will fall back to {@code settings}.
|
||||
*
|
||||
* @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to
|
||||
* @param profileSettings the {@link Settings} to read the transport profile from
|
||||
* @param settings the {@link Settings} instance to read the transport settings from
|
||||
*/
|
||||
private static void addSocketPermissionForTransportProfile(
|
||||
private static void addSocketPermissionForTransportProfiles(
|
||||
final Permissions policy,
|
||||
final Settings profileSettings,
|
||||
final Settings settings) {
|
||||
final String transportRange = profileSettings.get("port");
|
||||
if (transportRange != null) {
|
||||
addSocketPermissionForPortRange(policy, transportRange);
|
||||
} else {
|
||||
addSocketPermissionForTransport(policy, settings);
|
||||
// transport is way over-engineered
|
||||
final Map<String, Settings> profiles = new HashMap<>(TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups());
|
||||
profiles.putIfAbsent(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY);
|
||||
|
||||
// loop through all profiles and add permissions for each one, if it's valid; otherwise Netty transports are lenient and ignores it
|
||||
for (final Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
||||
final Settings profileSettings = entry.getValue();
|
||||
final String name = entry.getKey();
|
||||
|
||||
// a profile is only valid if it's the default profile, or if it has an actual name and specifies a port
|
||||
// TODO: can this leniency be removed?
|
||||
final boolean valid =
|
||||
TransportSettings.DEFAULT_PROFILE.equals(name) ||
|
||||
(name != null && name.length() > 0 && profileSettings.get("port") != null);
|
||||
if (valid) {
|
||||
final String transportRange = profileSettings.get("port");
|
||||
if (transportRange != null) {
|
||||
addSocketPermissionForPortRange(policy, transportRange);
|
||||
} else {
|
||||
addSocketPermissionForTransport(policy, settings);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -346,6 +336,16 @@ final class Security {
|
||||
addSocketPermissionForPortRange(policy, transportRange);
|
||||
}
|
||||
|
||||
private static void addSocketPermissionForTribeNodes(final Permissions policy, final Settings settings) {
|
||||
for (final Settings tribeNodeSettings : settings.getGroups("tribe", true).values()) {
|
||||
// tribe nodes have HTTP disabled by default, so we check if HTTP is enabled before granting
|
||||
if (NetworkModule.HTTP_ENABLED.exists(tribeNodeSettings) && NetworkModule.HTTP_ENABLED.get(tribeNodeSettings)) {
|
||||
addSocketPermissionForHttp(policy, tribeNodeSettings);
|
||||
}
|
||||
addSocketPermissionForTransport(policy, tribeNodeSettings);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add dynamic {@link SocketPermission} for the specified port range.
|
||||
*
|
||||
|
@ -27,7 +27,6 @@ import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.client.support.AbstractClient;
|
||||
import org.elasticsearch.client.transport.support.TransportProxyClient;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
@ -40,6 +39,7 @@ import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.node.Node;
|
||||
@ -65,6 +65,8 @@ import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
||||
/**
|
||||
* The transport client allows to create a client that is not part of the cluster, but simply connects to one
|
||||
* or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
|
||||
@ -74,6 +76,15 @@ import java.util.stream.Collectors;
|
||||
*/
|
||||
public abstract class TransportClient extends AbstractClient {
|
||||
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL =
|
||||
Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Setting.Property.NodeScope);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT =
|
||||
Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Setting.Property.NodeScope);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME =
|
||||
Setting.boolSetting("client.transport.ignore_cluster_name", false, Setting.Property.NodeScope);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF =
|
||||
Setting.boolSetting("client.transport.sniff", false, Setting.Property.NodeScope);
|
||||
|
||||
private static PluginsService newPluginService(final Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
final Settings.Builder settingsBuilder = Settings.builder()
|
||||
.put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
|
||||
@ -101,7 +112,7 @@ public abstract class TransportClient extends AbstractClient {
|
||||
}
|
||||
|
||||
private static ClientTemplate buildTemplate(Settings providedSettings, Settings defaultSettings,
|
||||
Collection<Class<? extends Plugin>> plugins) {
|
||||
Collection<Class<? extends Plugin>> plugins, HostFailureListener failureListner) {
|
||||
if (Node.NODE_NAME_SETTING.exists(providedSettings) == false) {
|
||||
providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build();
|
||||
}
|
||||
@ -140,7 +151,6 @@ public abstract class TransportClient extends AbstractClient {
|
||||
pluginsService.filterPlugins(ActionPlugin.class));
|
||||
modules.add(actionModule);
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
|
||||
settingsModule.getClusterSettings());
|
||||
resourcesToClose.add(circuitBreakerService);
|
||||
@ -164,7 +174,8 @@ public abstract class TransportClient extends AbstractClient {
|
||||
|
||||
Injector injector = modules.createInjector();
|
||||
final TransportClientNodesService nodesService =
|
||||
new TransportClientNodesService(settings, transportService, threadPool);
|
||||
new TransportClientNodesService(settings, transportService, threadPool, failureListner == null
|
||||
? (t, e) -> {} : failureListner);
|
||||
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService,
|
||||
actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList()));
|
||||
|
||||
@ -222,7 +233,7 @@ public abstract class TransportClient extends AbstractClient {
|
||||
* Creates a new TransportClient with the given settings and plugins
|
||||
*/
|
||||
public TransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
this(buildTemplate(settings, Settings.EMPTY, plugins));
|
||||
this(buildTemplate(settings, Settings.EMPTY, plugins, null));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -231,8 +242,9 @@ public abstract class TransportClient extends AbstractClient {
|
||||
* @param defaultSettings default settings that are merged after the plugins have added it's additional settings.
|
||||
* @param plugins the client plugins
|
||||
*/
|
||||
protected TransportClient(Settings settings, Settings defaultSettings, Collection<Class<? extends Plugin>> plugins) {
|
||||
this(buildTemplate(settings, defaultSettings, plugins));
|
||||
protected TransportClient(Settings settings, Settings defaultSettings, Collection<Class<? extends Plugin>> plugins,
|
||||
HostFailureListener hostFailureListener) {
|
||||
this(buildTemplate(settings, defaultSettings, plugins, hostFailureListener));
|
||||
}
|
||||
|
||||
private TransportClient(ClientTemplate template) {
|
||||
@ -332,4 +344,22 @@ public abstract class TransportClient extends AbstractClient {
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
proxy.execute(action, request, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Listener that allows to be notified whenever a node failure / disconnect happens
|
||||
*/
|
||||
@FunctionalInterface
|
||||
public interface HostFailureListener {
|
||||
/**
|
||||
* Called once a node disconnect is detected.
|
||||
* @param node the node that has been disconnected
|
||||
* @param ex the exception causing the disconnection
|
||||
*/
|
||||
void onNodeDisconnected(DiscoveryNode node, Exception ex);
|
||||
}
|
||||
|
||||
// pkg private for testing
|
||||
TransportClientNodesService getNodesService() {
|
||||
return nodesService;
|
||||
}
|
||||
}
|
||||
|
@ -35,8 +35,6 @@ import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
@ -45,6 +43,8 @@ import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeDisconnectedException;
|
||||
import org.elasticsearch.transport.NodeNotConnectedException;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
@ -64,9 +64,7 @@ import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
|
||||
public class TransportClientNodesService extends AbstractComponent implements Closeable {
|
||||
final class TransportClientNodesService extends AbstractComponent implements Closeable {
|
||||
|
||||
private final TimeValue nodesSamplerInterval;
|
||||
|
||||
@ -100,37 +98,30 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
|
||||
private volatile boolean closed;
|
||||
|
||||
private final TransportClient.HostFailureListener hostFailureListener;
|
||||
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL =
|
||||
Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Property.NodeScope);
|
||||
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT =
|
||||
Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Property.NodeScope);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME =
|
||||
Setting.boolSetting("client.transport.ignore_cluster_name", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF =
|
||||
Setting.boolSetting("client.transport.sniff", false, Property.NodeScope);
|
||||
|
||||
public TransportClientNodesService(Settings settings,TransportService transportService,
|
||||
ThreadPool threadPool) {
|
||||
TransportClientNodesService(Settings settings, TransportService transportService,
|
||||
ThreadPool threadPool, TransportClient.HostFailureListener hostFailureListener) {
|
||||
super(settings);
|
||||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
this.transportService = transportService;
|
||||
this.threadPool = threadPool;
|
||||
this.minCompatibilityVersion = Version.CURRENT.minimumCompatibilityVersion();
|
||||
|
||||
this.nodesSamplerInterval = CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
|
||||
this.pingTimeout = CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();
|
||||
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
||||
this.nodesSamplerInterval = TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
|
||||
this.pingTimeout = TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();
|
||||
this.ignoreClusterName = TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("node_sampler_interval[{}]", nodesSamplerInterval);
|
||||
}
|
||||
|
||||
if (CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
||||
if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(this.settings)) {
|
||||
this.nodesSampler = new SniffNodesSampler();
|
||||
} else {
|
||||
this.nodesSampler = new SimpleNodeSampler();
|
||||
}
|
||||
this.hostFailureListener = hostFailureListener;
|
||||
this.nodesSamplerFuture = threadPool.schedule(nodesSamplerInterval, ThreadPool.Names.GENERIC, new ScheduledNodeSampler());
|
||||
}
|
||||
|
||||
@ -195,15 +186,25 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
if (closed) {
|
||||
throw new IllegalStateException("transport client is closed, can't remove an address");
|
||||
}
|
||||
List<DiscoveryNode> builder = new ArrayList<>();
|
||||
List<DiscoveryNode> listNodesBuilder = new ArrayList<>();
|
||||
for (DiscoveryNode otherNode : listedNodes) {
|
||||
if (!otherNode.getAddress().equals(transportAddress)) {
|
||||
builder.add(otherNode);
|
||||
listNodesBuilder.add(otherNode);
|
||||
} else {
|
||||
logger.debug("removing address [{}]", otherNode);
|
||||
logger.debug("removing address [{}] from listed nodes", otherNode);
|
||||
}
|
||||
}
|
||||
listedNodes = Collections.unmodifiableList(builder);
|
||||
listedNodes = Collections.unmodifiableList(listNodesBuilder);
|
||||
List<DiscoveryNode> nodesBuilder = new ArrayList<>();
|
||||
for (DiscoveryNode otherNode : nodes) {
|
||||
if (!otherNode.getAddress().equals(transportAddress)) {
|
||||
nodesBuilder.add(otherNode);
|
||||
} else {
|
||||
logger.debug("disconnecting from node with address [{}]", otherNode);
|
||||
transportService.disconnectFromNode(otherNode);
|
||||
}
|
||||
}
|
||||
nodes = Collections.unmodifiableList(nodesBuilder);
|
||||
nodesSampler.sample();
|
||||
}
|
||||
return this;
|
||||
@ -224,13 +225,17 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
}
|
||||
ensureNodesAreAvailable(nodes);
|
||||
int index = getNodeNumber();
|
||||
RetryListener<Response> retryListener = new RetryListener<>(callback, listener, nodes, index);
|
||||
DiscoveryNode node = nodes.get((index) % nodes.size());
|
||||
RetryListener<Response> retryListener = new RetryListener<>(callback, listener, nodes, index, hostFailureListener);
|
||||
DiscoveryNode node = retryListener.getNode(0);
|
||||
try {
|
||||
callback.doWithNode(node, retryListener);
|
||||
} catch (Exception e) {
|
||||
//this exception can't come from the TransportService as it doesn't throw exception at all
|
||||
listener.onFailure(e);
|
||||
try {
|
||||
//this exception can't come from the TransportService as it doesn't throw exception at all
|
||||
listener.onFailure(e);
|
||||
} finally {
|
||||
retryListener.maybeNodeFailed(node, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,15 +244,17 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
private final ActionListener<Response> listener;
|
||||
private final List<DiscoveryNode> nodes;
|
||||
private final int index;
|
||||
private final TransportClient.HostFailureListener hostFailureListener;
|
||||
|
||||
private volatile int i;
|
||||
|
||||
public RetryListener(NodeListenerCallback<Response> callback, ActionListener<Response> listener,
|
||||
List<DiscoveryNode> nodes, int index) {
|
||||
List<DiscoveryNode> nodes, int index, TransportClient.HostFailureListener hostFailureListener) {
|
||||
this.callback = callback;
|
||||
this.listener = listener;
|
||||
this.nodes = nodes;
|
||||
this.index = index;
|
||||
this.hostFailureListener = hostFailureListener;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -257,13 +264,15 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) {
|
||||
Throwable throwable = ExceptionsHelper.unwrapCause(e);
|
||||
if (throwable instanceof ConnectTransportException) {
|
||||
maybeNodeFailed(getNode(this.i), (ConnectTransportException) throwable);
|
||||
int i = ++this.i;
|
||||
if (i >= nodes.size()) {
|
||||
listener.onFailure(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e));
|
||||
} else {
|
||||
try {
|
||||
callback.doWithNode(nodes.get((index + i) % nodes.size()), this);
|
||||
callback.doWithNode(getNode(i), this);
|
||||
} catch(final Exception inner) {
|
||||
inner.addSuppressed(e);
|
||||
// this exception can't come from the TransportService as it doesn't throw exceptions at all
|
||||
@ -275,7 +284,15 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
}
|
||||
}
|
||||
|
||||
final DiscoveryNode getNode(int i) {
|
||||
return nodes.get((index + i) % nodes.size());
|
||||
}
|
||||
|
||||
final void maybeNodeFailed(DiscoveryNode node, Exception ex) {
|
||||
if (ex instanceof NodeDisconnectedException || ex instanceof NodeNotConnectedException) {
|
||||
hostFailureListener.onNodeDisconnected(node, ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -377,6 +394,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
logger.debug(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("failed to connect to node [{}], removed from nodes list", listedNode), e);
|
||||
hostFailureListener.onNodeDisconnected(listedNode, e);
|
||||
newFilteredNodes.add(listedNode);
|
||||
continue;
|
||||
}
|
||||
@ -411,6 +429,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
logger.info(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
hostFailureListener.onNodeDisconnected(listedNode, e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -489,6 +508,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
"failed to get local cluster state for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
latch.countDown();
|
||||
hostFailureListener.onNodeDisconnected(listedNode, e);
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
@ -497,6 +517,7 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
"failed to get local cluster state info for {}, disconnecting...", listedNode), e);
|
||||
transportService.disconnectFromNode(listedNode);
|
||||
latch.countDown();
|
||||
hostFailureListener.onNodeDisconnected(listedNode, e);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -531,4 +552,9 @@ public class TransportClientNodesService extends AbstractComponent implements Cl
|
||||
|
||||
void doWithNode(DiscoveryNode node, ActionListener<Response> listener);
|
||||
}
|
||||
|
||||
// pkg private for testing
|
||||
void doSample() {
|
||||
nodesSampler.doSample();
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.transport.support;
|
||||
package org.elasticsearch.client.transport;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
@ -26,9 +26,6 @@ import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.GenericAction;
|
||||
import org.elasticsearch.action.TransportActionNodeProxy;
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
@ -39,12 +36,12 @@ import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class TransportProxyClient {
|
||||
final class TransportProxyClient {
|
||||
|
||||
private final TransportClientNodesService nodesService;
|
||||
private final Map<Action, TransportActionNodeProxy> proxies;
|
||||
|
||||
public TransportProxyClient(Settings settings, TransportService transportService,
|
||||
TransportProxyClient(Settings settings, TransportService transportService,
|
||||
TransportClientNodesService nodesService, List<GenericAction> actions) {
|
||||
this.nodesService = nodesService;
|
||||
Map<Action, TransportActionNodeProxy> proxies = new HashMap<>();
|
||||
@ -56,7 +53,9 @@ public class TransportProxyClient {
|
||||
this.proxies = unmodifiableMap(proxies);
|
||||
}
|
||||
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final Action<Request, Response, RequestBuilder> action, final Request request, ActionListener<Response> listener) {
|
||||
public <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends
|
||||
ActionRequestBuilder<Request, Response, RequestBuilder>> void execute(final Action<Request, Response, RequestBuilder> action,
|
||||
final Request request, ActionListener<Response> listener) {
|
||||
final TransportActionNodeProxy<Request, Response> proxy = proxies.get(action);
|
||||
nodesService.execute((n, l) -> proxy.execute(n, request, l), listener);
|
||||
}
|
@ -143,10 +143,12 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
private volatile RoutingNodes routingNodes;
|
||||
|
||||
public ClusterState(long version, String stateUUID, ClusterState state) {
|
||||
this(state.clusterName, version, stateUUID, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false);
|
||||
this(state.clusterName, version, stateUUID, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(),
|
||||
false);
|
||||
}
|
||||
|
||||
public ClusterState(ClusterName clusterName, long version, String stateUUID, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap<String, Custom> customs, boolean wasReadFromDiff) {
|
||||
public ClusterState(ClusterName clusterName, long version, String stateUUID, MetaData metaData, RoutingTable routingTable,
|
||||
DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap<String, Custom> customs, boolean wasReadFromDiff) {
|
||||
this.version = version;
|
||||
this.stateUUID = stateUUID;
|
||||
this.clusterName = clusterName;
|
||||
@ -272,12 +274,14 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
||||
}
|
||||
|
||||
/**
|
||||
* a cluster state supersedes another state iff they are from the same master and the version this state is higher thant the other state.
|
||||
* a cluster state supersedes another state iff they are from the same master and the version this state is higher thant the other
|
||||
* state.
|
||||
* <p>
|
||||
* In essence that means that all the changes from the other cluster state are also reflected by the current one
|
||||
*/
|
||||
public boolean supersedes(ClusterState other) {
|
||||
return this.nodes().getMasterNodeId() != null && this.nodes().getMasterNodeId().equals(other.nodes().getMasterNodeId()) && this.version() > other.version();
|
||||
return this.nodes().getMasterNodeId() != null && this.nodes().getMasterNodeId().equals(other.nodes().getMasterNodeId())
|
||||
&& this.version() > other.version();
|
||||
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.cluster;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public interface ClusterStateTaskExecutor<T> {
|
||||
/**
|
||||
@ -149,18 +148,5 @@ public interface ClusterStateTaskExecutor<T> {
|
||||
assert !isSuccess();
|
||||
return failure;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the execution result with the provided consumers
|
||||
* @param onSuccess handler to invoke on success
|
||||
* @param onFailure handler to invoke on failure; the throwable passed through will not be null
|
||||
*/
|
||||
public void handle(Runnable onSuccess, Consumer<Exception> onFailure) {
|
||||
if (failure == null) {
|
||||
onSuccess.run();
|
||||
} else {
|
||||
onFailure.accept(failure);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
||||
public IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
|
||||
Builder builder = new Builder(in.readString());
|
||||
builder.order(in.readInt());
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
builder.patterns(in.readList(StreamInput::readString));
|
||||
} else {
|
||||
builder.patterns(Collections.singletonList(in.readString()));
|
||||
@ -239,7 +239,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
out.writeInt(order);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeStringList(patterns);
|
||||
} else {
|
||||
out.writeString(patterns.size() > 0 ? patterns.get(0) : "");
|
||||
|
@ -73,32 +73,6 @@ public class PlainShardsIterator implements ShardsIterator {
|
||||
return count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int assignedReplicasIncludingRelocating() {
|
||||
int count = 0;
|
||||
for (ShardRouting shard : shards) {
|
||||
if (shard.unassigned()) {
|
||||
continue;
|
||||
}
|
||||
// if the shard is primary and relocating, add one to the counter since we perform it on the replica as well
|
||||
// (and we already did it on the primary)
|
||||
if (shard.primary()) {
|
||||
if (shard.relocating()) {
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
count++;
|
||||
// if we are relocating the replica, we want to perform the index operation on both the relocating
|
||||
// shard and the target shard. This means that we won't loose index operations between end of recovery
|
||||
// and reassignment of the shard by the master node
|
||||
if (shard.relocating()) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<ShardRouting> asUnordered() {
|
||||
return shards;
|
||||
|
@ -42,13 +42,6 @@ public interface ShardsIterator {
|
||||
*/
|
||||
int sizeActive();
|
||||
|
||||
/**
|
||||
* Returns the number of replicas in this iterator that are not in the
|
||||
* {@link ShardRoutingState#UNASSIGNED}. The returned double-counts replicas
|
||||
* that are in the state {@link ShardRoutingState#RELOCATING}
|
||||
*/
|
||||
int assignedReplicasIncludingRelocating();
|
||||
|
||||
/**
|
||||
* Returns the next shard, or <tt>null</tt> if none available.
|
||||
*/
|
||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.cluster.ClusterState.Builder;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor.BatchResult;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.LocalNodeMasterListener;
|
||||
@ -107,7 +108,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
|
||||
private TimeValue slowTaskLoggingThreshold;
|
||||
|
||||
private volatile PrioritizedEsThreadPoolExecutor updateTasksExecutor;
|
||||
private volatile PrioritizedEsThreadPoolExecutor threadPoolExecutor;
|
||||
|
||||
/**
|
||||
* Those 3 state listeners are changing infrequently - CopyOnWriteArrayList is just fine
|
||||
@ -240,7 +241,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
updateState(css -> new ClusterServiceState(
|
||||
ClusterState.builder(css.getClusterState()).blocks(initialBlocks).build(),
|
||||
css.getClusterStateStatus()));
|
||||
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME),
|
||||
this.threadPoolExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME),
|
||||
threadPool.getThreadContext());
|
||||
}
|
||||
|
||||
@ -255,12 +256,12 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
logger.debug("failed to notify listeners on shutdown", ex);
|
||||
}
|
||||
}
|
||||
ThreadPool.terminate(updateTasksExecutor, 10, TimeUnit.SECONDS);
|
||||
ThreadPool.terminate(threadPoolExecutor, 10, TimeUnit.SECONDS);
|
||||
// close timeout listeners that did not have an ongoing timeout
|
||||
postAppliedListeners
|
||||
.stream()
|
||||
.filter(listener -> listener instanceof TimeoutClusterStateListener)
|
||||
.map(listener -> (TimeoutClusterStateListener)listener)
|
||||
.map(listener -> (TimeoutClusterStateListener) listener)
|
||||
.forEach(TimeoutClusterStateListener::onClose);
|
||||
remove(localNodeMasterListeners);
|
||||
}
|
||||
@ -364,7 +365,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
}
|
||||
// call the post added notification on the same event thread
|
||||
try {
|
||||
updateTasksExecutor.execute(new SourcePrioritizedRunnable(Priority.HIGH, "_add_listener_") {
|
||||
threadPoolExecutor.execute(new SourcePrioritizedRunnable(Priority.HIGH, "_add_listener_") {
|
||||
@Override
|
||||
public void run() {
|
||||
if (timeout != null) {
|
||||
@ -448,54 +449,33 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
@SuppressWarnings("unchecked")
|
||||
ClusterStateTaskExecutor<Object> taskExecutor = (ClusterStateTaskExecutor<Object>) executor;
|
||||
// convert to an identity map to check for dups based on update tasks semantics of using identity instead of equal
|
||||
final IdentityHashMap<T, ClusterStateTaskListener> tasksIdentity = new IdentityHashMap<>(tasks);
|
||||
final List<UpdateTask<T>> updateTasks = tasksIdentity.entrySet().stream().map(
|
||||
entry -> new UpdateTask<>(source, entry.getKey(), config.priority(), executor, safe(entry.getValue(), logger))
|
||||
final IdentityHashMap<Object, ClusterStateTaskListener> tasksIdentity = new IdentityHashMap<>(tasks);
|
||||
final List<UpdateTask> updateTasks = tasksIdentity.entrySet().stream().map(
|
||||
entry -> new UpdateTask(source, entry.getKey(), config.priority(), taskExecutor, safe(entry.getValue(), logger))
|
||||
).collect(Collectors.toList());
|
||||
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
LinkedHashSet<UpdateTask> existingTasks = updateTasksPerExecutor.computeIfAbsent(executor,
|
||||
k -> new LinkedHashSet<>(updateTasks.size()));
|
||||
for (@SuppressWarnings("unchecked") UpdateTask<T> existing : existingTasks) {
|
||||
for (UpdateTask existing : existingTasks) {
|
||||
if (tasksIdentity.containsKey(existing.task)) {
|
||||
throw new IllegalStateException("task [" + executor.describeTasks(Collections.singletonList(existing.task)) +
|
||||
throw new IllegalStateException("task [" + taskExecutor.describeTasks(Collections.singletonList(existing.task)) +
|
||||
"] with source [" + source + "] is already queued");
|
||||
}
|
||||
}
|
||||
existingTasks.addAll(updateTasks);
|
||||
}
|
||||
|
||||
final UpdateTask<T> firstTask = updateTasks.get(0);
|
||||
final UpdateTask firstTask = updateTasks.get(0);
|
||||
|
||||
final TimeValue timeout = config.timeout();
|
||||
if (timeout != null) {
|
||||
updateTasksExecutor.execute(firstTask, threadPool.scheduler(), timeout, () -> threadPool.generic().execute(() -> {
|
||||
final ArrayList<UpdateTask<T>> toRemove = new ArrayList<>();
|
||||
for (UpdateTask<T> task : updateTasks) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.debug("cluster state update task [{}] timed out after [{}]", source, timeout);
|
||||
toRemove.add(task);
|
||||
}
|
||||
}
|
||||
if (toRemove.isEmpty() == false) {
|
||||
ClusterStateTaskExecutor<T> clusterStateTaskExecutor = toRemove.get(0).executor;
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
LinkedHashSet<UpdateTask> existingTasks = updateTasksPerExecutor.get(clusterStateTaskExecutor);
|
||||
if (existingTasks != null) {
|
||||
existingTasks.removeAll(toRemove);
|
||||
if (existingTasks.isEmpty()) {
|
||||
updateTasksPerExecutor.remove(clusterStateTaskExecutor);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (UpdateTask<T> task : toRemove) {
|
||||
task.listener.onFailure(source, new ProcessClusterEventTimeoutException(timeout, source));
|
||||
}
|
||||
}
|
||||
}));
|
||||
threadPoolExecutor.execute(firstTask, threadPool.scheduler(), timeout, () -> onTimeout(updateTasks, source, timeout));
|
||||
} else {
|
||||
updateTasksExecutor.execute(firstTask);
|
||||
threadPoolExecutor.execute(firstTask);
|
||||
}
|
||||
} catch (EsRejectedExecutionException e) {
|
||||
// ignore cases where we are shutting down..., there is really nothing interesting
|
||||
@ -506,11 +486,38 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
}
|
||||
}
|
||||
|
||||
private void onTimeout(List<UpdateTask> updateTasks, String source, TimeValue timeout) {
|
||||
threadPool.generic().execute(() -> {
|
||||
final ArrayList<UpdateTask> toRemove = new ArrayList<>();
|
||||
for (UpdateTask task : updateTasks) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.debug("cluster state update task [{}] timed out after [{}]", source, timeout);
|
||||
toRemove.add(task);
|
||||
}
|
||||
}
|
||||
if (toRemove.isEmpty() == false) {
|
||||
ClusterStateTaskExecutor<Object> clusterStateTaskExecutor = toRemove.get(0).executor;
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
LinkedHashSet<UpdateTask> existingTasks = updateTasksPerExecutor.get(clusterStateTaskExecutor);
|
||||
if (existingTasks != null) {
|
||||
existingTasks.removeAll(toRemove);
|
||||
if (existingTasks.isEmpty()) {
|
||||
updateTasksPerExecutor.remove(clusterStateTaskExecutor);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (UpdateTask task : toRemove) {
|
||||
task.listener.onFailure(source, new ProcessClusterEventTimeoutException(timeout, source));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the tasks that are pending.
|
||||
*/
|
||||
public List<PendingClusterTask> pendingTasks() {
|
||||
PrioritizedEsThreadPoolExecutor.Pending[] pendings = updateTasksExecutor.getPending();
|
||||
PrioritizedEsThreadPoolExecutor.Pending[] pendings = threadPoolExecutor.getPending();
|
||||
List<PendingClusterTask> pendingClusterTasks = new ArrayList<>(pendings.length);
|
||||
for (PrioritizedEsThreadPoolExecutor.Pending pending : pendings) {
|
||||
final String source;
|
||||
@ -539,7 +546,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
* Returns the number of currently pending tasks.
|
||||
*/
|
||||
public int numberOfPendingTasks() {
|
||||
return updateTasksExecutor.getNumberOfPendingTasks();
|
||||
return threadPoolExecutor.getNumberOfPendingTasks();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -548,7 +555,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
* @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue
|
||||
*/
|
||||
public TimeValue getMaxTaskWaitTime() {
|
||||
return updateTasksExecutor.getMaxTaskWaitTime();
|
||||
return threadPoolExecutor.getMaxTaskWaitTime();
|
||||
}
|
||||
|
||||
/** asserts that the current thread is the cluster state update thread */
|
||||
@ -582,47 +589,90 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
}
|
||||
}
|
||||
|
||||
<T> void runTasksForExecutor(ClusterStateTaskExecutor<T> executor) {
|
||||
final ArrayList<UpdateTask<T>> toExecute = new ArrayList<>();
|
||||
final Map<String, ArrayList<T>> processTasksBySource = new HashMap<>();
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
LinkedHashSet<UpdateTask> pending = updateTasksPerExecutor.remove(executor);
|
||||
if (pending != null) {
|
||||
for (UpdateTask<T> task : pending) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.trace("will process {}", task);
|
||||
toExecute.add(task);
|
||||
processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task.task);
|
||||
} else {
|
||||
logger.trace("skipping {}, already processed", task);
|
||||
}
|
||||
}
|
||||
void runTasks(TaskInputs taskInputs) {
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster service not started", taskInputs.summary);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug("processing [{}]: execute", taskInputs.summary);
|
||||
ClusterServiceState previousClusterServiceState = clusterServiceState();
|
||||
ClusterState previousClusterState = previousClusterServiceState.getClusterState();
|
||||
|
||||
if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", taskInputs.summary);
|
||||
taskInputs.onNoLongerMaster();
|
||||
return;
|
||||
}
|
||||
|
||||
long startTimeNS = currentTimeInNanos();
|
||||
TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterServiceState, startTimeNS);
|
||||
taskOutputs.notifyFailedTasks();
|
||||
|
||||
if (taskOutputs.clusterStateUnchanged()) {
|
||||
taskOutputs.notifySuccessfulTasksOnUnchangedClusterState();
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took [{}] no change in cluster_state", taskInputs.summary, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary);
|
||||
} else {
|
||||
ClusterState newClusterState = taskOutputs.newClusterServiceState.getClusterState();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("cluster state updated, source [{}]\n{}", taskInputs.summary, newClusterState);
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), taskInputs.summary);
|
||||
}
|
||||
try {
|
||||
publishAndApplyChanges(taskInputs, taskOutputs);
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took [{}] done applying updated cluster_state (version: {}, uuid: {})", taskInputs.summary,
|
||||
executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary);
|
||||
} catch (Exception e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
final long version = newClusterState.version();
|
||||
final String stateUUID = newClusterState.stateUUID();
|
||||
final String fullState = newClusterState.toString();
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
||||
executionTime,
|
||||
version,
|
||||
stateUUID,
|
||||
taskInputs.summary,
|
||||
fullState),
|
||||
e);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
}
|
||||
if (toExecute.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
final String tasksSummary = processTasksBySource.entrySet().stream().map(entry -> {
|
||||
String tasks = executor.describeTasks(entry.getValue());
|
||||
return tasks.isEmpty() ? entry.getKey() : entry.getKey() + "[" + tasks + "]";
|
||||
}).reduce((s1, s2) -> s1 + ", " + s2).orElse("");
|
||||
}
|
||||
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster_service not started", tasksSummary);
|
||||
return;
|
||||
public TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterServiceState previousClusterServiceState, long startTimeNS) {
|
||||
ClusterState previousClusterState = previousClusterServiceState.getClusterState();
|
||||
BatchResult<Object> batchResult = executeTasks(taskInputs, startTimeNS, previousClusterState);
|
||||
ClusterState newClusterState = batchResult.resultingState;
|
||||
// extract those that are waiting for results
|
||||
List<UpdateTask> nonFailedTasks = new ArrayList<>();
|
||||
for (UpdateTask updateTask : taskInputs.updateTasks) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask;
|
||||
final ClusterStateTaskExecutor.TaskResult taskResult =
|
||||
batchResult.executionResults.get(updateTask.task);
|
||||
if (taskResult.isSuccess()) {
|
||||
nonFailedTasks.add(updateTask);
|
||||
}
|
||||
}
|
||||
logger.debug("processing [{}]: execute", tasksSummary);
|
||||
ClusterState previousClusterState = clusterServiceState().getClusterState();
|
||||
if (!previousClusterState.nodes().isLocalNodeElectedMaster() && executor.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", tasksSummary);
|
||||
toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source));
|
||||
return;
|
||||
}
|
||||
ClusterStateTaskExecutor.BatchResult<T> batchResult;
|
||||
long startTimeNS = currentTimeInNanos();
|
||||
newClusterState = patchVersions(previousClusterState, newClusterState);
|
||||
|
||||
ClusterServiceState newClusterServiceState = new ClusterServiceState(newClusterState, ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
return new TaskOutputs(taskInputs, previousClusterServiceState, newClusterServiceState, nonFailedTasks,
|
||||
batchResult.executionResults);
|
||||
}
|
||||
|
||||
private BatchResult<Object> executeTasks(TaskInputs taskInputs, long startTimeNS, ClusterState previousClusterState) {
|
||||
BatchResult<Object> batchResult;
|
||||
try {
|
||||
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
|
||||
batchResult = executor.execute(previousClusterState, inputs);
|
||||
List<Object> inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
|
||||
batchResult = taskInputs.executor.execute(previousClusterState, inputs);
|
||||
} catch (Exception e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
@ -631,65 +681,36 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
"failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}",
|
||||
executionTime,
|
||||
previousClusterState.version(),
|
||||
tasksSummary,
|
||||
taskInputs.summary,
|
||||
previousClusterState.nodes(),
|
||||
previousClusterState.routingTable(),
|
||||
previousClusterState.getRoutingNodes()),
|
||||
e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
|
||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder()
|
||||
.failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e)
|
||||
.build(previousClusterState);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary);
|
||||
batchResult = BatchResult.builder()
|
||||
.failures(taskInputs.updateTasks.stream().map(updateTask -> updateTask.task)::iterator, e)
|
||||
.build(previousClusterState);
|
||||
}
|
||||
|
||||
assert batchResult.executionResults != null;
|
||||
assert batchResult.executionResults.size() == toExecute.size()
|
||||
: String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(),
|
||||
toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size());
|
||||
assert batchResult.executionResults.size() == taskInputs.updateTasks.size()
|
||||
: String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", taskInputs.updateTasks.size(),
|
||||
taskInputs.updateTasks.size() == 1 ? "" : "s", batchResult.executionResults.size());
|
||||
boolean assertsEnabled = false;
|
||||
assert (assertsEnabled = true);
|
||||
if (assertsEnabled) {
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
for (UpdateTask updateTask : taskInputs.updateTasks) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) :
|
||||
"missing task result for " + updateTask;
|
||||
}
|
||||
}
|
||||
|
||||
ClusterState newClusterState = batchResult.resultingState;
|
||||
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
|
||||
// fail all tasks that have failed and extract those that are waiting for results
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask;
|
||||
final ClusterStateTaskExecutor.TaskResult executionResult =
|
||||
batchResult.executionResults.get(updateTask.task);
|
||||
executionResult.handle(
|
||||
() -> proccessedListeners.add(updateTask),
|
||||
ex -> {
|
||||
logger.debug(
|
||||
(Supplier<?>)
|
||||
() -> new ParameterizedMessage("cluster state update task {} failed", updateTask), ex);
|
||||
updateTask.listener.onFailure(updateTask.source, ex);
|
||||
}
|
||||
);
|
||||
}
|
||||
return batchResult;
|
||||
}
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
|
||||
((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null);
|
||||
}
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took [{}] no change in cluster_state", tasksSummary, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
ArrayList<Discovery.AckListener> ackListeners = new ArrayList<>();
|
||||
private ClusterState patchVersions(ClusterState previousClusterState, ClusterState newClusterState) {
|
||||
if (previousClusterState != newClusterState) {
|
||||
if (newClusterState.nodes().isLocalNodeElectedMaster()) {
|
||||
// only the master controls the version numbers
|
||||
Builder builder = ClusterState.builder(newClusterState).incrementVersion();
|
||||
@ -701,152 +722,221 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
|
||||
}
|
||||
newClusterState = builder.build();
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
final AckedClusterStateTaskListener ackedListener = (AckedClusterStateTaskListener) task.listener;
|
||||
if (ackedListener.ackTimeout() == null || ackedListener.ackTimeout().millis() == 0) {
|
||||
ackedListener.onAckTimeout();
|
||||
} else {
|
||||
try {
|
||||
ackListeners.add(new AckCountDownListener(ackedListener, newClusterState.version(), newClusterState.nodes(),
|
||||
threadPool));
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
|
||||
}
|
||||
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
|
||||
ackedListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
final Discovery.AckListener ackListener = new DelegetingAckListener(ackListeners);
|
||||
}
|
||||
return newClusterState;
|
||||
}
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("cluster state updated, source [{}]\n{}", tasksSummary, newClusterState);
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), tasksSummary);
|
||||
private void publishAndApplyChanges(TaskInputs taskInputs, TaskOutputs taskOutputs) {
|
||||
ClusterState previousClusterState = taskOutputs.previousClusterServiceState.getClusterState();
|
||||
ClusterState newClusterState = taskOutputs.newClusterServiceState.getClusterState();
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(taskInputs.summary, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, taskInputs.summary);
|
||||
}
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(tasksSummary, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, tasksSummary);
|
||||
}
|
||||
}
|
||||
|
||||
nodeConnectionsService.connectToNodes(clusterChangedEvent.nodesDelta().addedNodes());
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().isLocalNodeElectedMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
final long version = newClusterState.version();
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failing [{}]: failed to commit cluster state version [{}]", tasksSummary, version),
|
||||
t);
|
||||
// ensure that list of connected nodes in NodeConnectionsService is in-sync with the nodes of the current cluster state
|
||||
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().addedNodes());
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
ClusterState finalNewClusterState = newClusterState;
|
||||
updateState(css -> new ClusterServiceState(finalNewClusterState, ClusterStateStatus.BEING_APPLIED));
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
try {
|
||||
// nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
|
||||
if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) {
|
||||
final Settings incomingSettings = clusterChangedEvent.state().metaData().settings();
|
||||
clusterSettings.applySettings(incomingSettings);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to apply cluster settings", ex);
|
||||
}
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes());
|
||||
|
||||
updateState(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED));
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().isLocalNodeElectedMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null);
|
||||
} catch (Exception e) {
|
||||
final DiscoveryNode localNode = newClusterState.nodes().getLocalNode();
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("error while processing ack for master node [{}]", localNode),
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
try {
|
||||
executor.clusterStatePublished(clusterChangedEvent);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception thrown while notifying executor of new cluster state publication [{}]",
|
||||
tasksSummary),
|
||||
e);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took [{}] done applying updated cluster_state (version: {}, uuid: {})", tasksSummary,
|
||||
executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, tasksSummary);
|
||||
} catch (Exception e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
|
||||
final long version = newClusterState.version();
|
||||
final String stateUUID = newClusterState.stateUUID();
|
||||
final String fullState = newClusterState.toString();
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}",
|
||||
executionTime,
|
||||
version,
|
||||
stateUUID,
|
||||
tasksSummary,
|
||||
fullState),
|
||||
e);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
|
||||
final Discovery.AckListener ackListener = newClusterState.nodes().isLocalNodeElectedMaster() ?
|
||||
taskOutputs.createAckListener(threadPool, newClusterState) :
|
||||
null;
|
||||
|
||||
nodeConnectionsService.connectToNodes(clusterChangedEvent.nodesDelta().addedNodes());
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().isLocalNodeElectedMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
final long version = newClusterState.version();
|
||||
logger.warn(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"failing [{}]: failed to commit cluster state version [{}]", taskInputs.summary, version),
|
||||
t);
|
||||
// ensure that list of connected nodes in NodeConnectionsService is in-sync with the nodes of the current cluster state
|
||||
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().addedNodes());
|
||||
taskOutputs.publishingFailed(t);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
updateState(css -> taskOutputs.newClusterServiceState);
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
try {
|
||||
// nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
|
||||
if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) {
|
||||
final Settings incomingSettings = clusterChangedEvent.state().metaData().settings();
|
||||
clusterSettings.applySettings(incomingSettings);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to apply cluster settings", ex);
|
||||
}
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes());
|
||||
|
||||
updateState(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED));
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().isLocalNodeElectedMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().getLocalNode(), null);
|
||||
} catch (Exception e) {
|
||||
final DiscoveryNode localNode = newClusterState.nodes().getLocalNode();
|
||||
logger.debug(
|
||||
(Supplier<?>) () -> new ParameterizedMessage("error while processing ack for master node [{}]", localNode),
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
taskOutputs.processedDifferentClusterState(previousClusterState, newClusterState);
|
||||
|
||||
try {
|
||||
taskOutputs.clusterStatePublished(clusterChangedEvent);
|
||||
} catch (Exception e) {
|
||||
logger.error(
|
||||
(Supplier<?>) () -> new ParameterizedMessage(
|
||||
"exception thrown while notifying executor of new cluster state publication [{}]",
|
||||
taskInputs.summary),
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a set of tasks to be processed together with their executor
|
||||
*/
|
||||
class TaskInputs {
|
||||
public final String summary;
|
||||
public final ArrayList<UpdateTask> updateTasks;
|
||||
public final ClusterStateTaskExecutor<Object> executor;
|
||||
|
||||
TaskInputs(ClusterStateTaskExecutor<Object> executor, ArrayList<UpdateTask> updateTasks, String summary) {
|
||||
this.summary = summary;
|
||||
this.executor = executor;
|
||||
this.updateTasks = updateTasks;
|
||||
}
|
||||
|
||||
public boolean runOnlyOnMaster() {
|
||||
return executor.runOnlyOnMaster();
|
||||
}
|
||||
|
||||
public void onNoLongerMaster() {
|
||||
updateTasks.stream().forEach(task -> task.listener.onNoLongerMaster(task.source));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Output created by executing a set of tasks provided as TaskInputs
|
||||
*/
|
||||
class TaskOutputs {
|
||||
public final TaskInputs taskInputs;
|
||||
public final ClusterServiceState previousClusterServiceState;
|
||||
public final ClusterServiceState newClusterServiceState;
|
||||
public final List<UpdateTask> nonFailedTasks;
|
||||
public final Map<Object, ClusterStateTaskExecutor.TaskResult> executionResults;
|
||||
|
||||
public TaskOutputs(TaskInputs taskInputs, ClusterServiceState previousClusterServiceState,
|
||||
ClusterServiceState newClusterServiceState, List<UpdateTask> nonFailedTasks,
|
||||
Map<Object, ClusterStateTaskExecutor.TaskResult> executionResults) {
|
||||
this.taskInputs = taskInputs;
|
||||
this.previousClusterServiceState = previousClusterServiceState;
|
||||
this.newClusterServiceState = newClusterServiceState;
|
||||
this.nonFailedTasks = nonFailedTasks;
|
||||
this.executionResults = executionResults;
|
||||
}
|
||||
|
||||
public void publishingFailed(Discovery.FailedToCommitClusterStateException t) {
|
||||
nonFailedTasks.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
}
|
||||
|
||||
public void processedDifferentClusterState(ClusterState previousClusterState, ClusterState newClusterState) {
|
||||
nonFailedTasks.forEach(task -> task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState));
|
||||
}
|
||||
|
||||
public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
|
||||
taskInputs.executor.clusterStatePublished(clusterChangedEvent);
|
||||
}
|
||||
|
||||
public Discovery.AckListener createAckListener(ThreadPool threadPool, ClusterState newClusterState) {
|
||||
ArrayList<Discovery.AckListener> ackListeners = new ArrayList<>();
|
||||
|
||||
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
|
||||
nonFailedTasks.stream().filter(task -> task.listener instanceof AckedClusterStateTaskListener).forEach(task -> {
|
||||
final AckedClusterStateTaskListener ackedListener = (AckedClusterStateTaskListener) task.listener;
|
||||
if (ackedListener.ackTimeout() == null || ackedListener.ackTimeout().millis() == 0) {
|
||||
ackedListener.onAckTimeout();
|
||||
} else {
|
||||
try {
|
||||
ackListeners.add(new AckCountDownListener(ackedListener, newClusterState.version(), newClusterState.nodes(),
|
||||
threadPool));
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
|
||||
}
|
||||
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
|
||||
ackedListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return new DelegetingAckListener(ackListeners);
|
||||
}
|
||||
|
||||
public boolean clusterStateUnchanged() {
|
||||
return previousClusterServiceState.getClusterState() == newClusterServiceState.getClusterState();
|
||||
}
|
||||
|
||||
public void notifyFailedTasks() {
|
||||
// fail all tasks that have failed
|
||||
for (UpdateTask updateTask : taskInputs.updateTasks) {
|
||||
assert executionResults.containsKey(updateTask.task) : "missing " + updateTask;
|
||||
final ClusterStateTaskExecutor.TaskResult taskResult = executionResults.get(updateTask.task);
|
||||
if (taskResult.isSuccess() == false) {
|
||||
updateTask.listener.onFailure(updateTask.source, taskResult.getFailure());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void notifySuccessfulTasksOnUnchangedClusterState() {
|
||||
ClusterState clusterState = newClusterServiceState.getClusterState();
|
||||
nonFailedTasks.forEach(task -> {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
|
||||
((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null);
|
||||
}
|
||||
task.listener.clusterStateProcessed(task.source, clusterState, clusterState);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// this one is overridden in tests so we can control time
|
||||
protected long currentTimeInNanos() {return System.nanoTime();}
|
||||
protected long currentTimeInNanos() {
|
||||
return System.nanoTime();
|
||||
}
|
||||
|
||||
private static SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Logger logger) {
|
||||
if (listener instanceof AckedClusterStateTaskListener) {
|
||||
@ -943,14 +1033,15 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
}
|
||||
}
|
||||
|
||||
class UpdateTask<T> extends SourcePrioritizedRunnable {
|
||||
class UpdateTask extends SourcePrioritizedRunnable {
|
||||
|
||||
public final T task;
|
||||
public final Object task;
|
||||
public final ClusterStateTaskListener listener;
|
||||
private final ClusterStateTaskExecutor<T> executor;
|
||||
private final ClusterStateTaskExecutor<Object> executor;
|
||||
public final AtomicBoolean processed = new AtomicBoolean();
|
||||
|
||||
UpdateTask(String source, T task, Priority priority, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
|
||||
UpdateTask(String source, Object task, Priority priority, ClusterStateTaskExecutor<Object> executor,
|
||||
ClusterStateTaskListener listener) {
|
||||
super(priority, source);
|
||||
this.task = task;
|
||||
this.executor = executor;
|
||||
@ -962,7 +1053,31 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
// if this task is already processed, the executor shouldn't execute other tasks (that arrived later),
|
||||
// to give other executors a chance to execute their tasks.
|
||||
if (processed.get() == false) {
|
||||
runTasksForExecutor(executor);
|
||||
final ArrayList<UpdateTask> toExecute = new ArrayList<>();
|
||||
final Map<String, ArrayList<Object>> processTasksBySource = new HashMap<>();
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
LinkedHashSet<UpdateTask> pending = updateTasksPerExecutor.remove(executor);
|
||||
if (pending != null) {
|
||||
for (UpdateTask task : pending) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.trace("will process {}", task);
|
||||
toExecute.add(task);
|
||||
processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task.task);
|
||||
} else {
|
||||
logger.trace("skipping {}, already processed", task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (toExecute.isEmpty() == false) {
|
||||
final String tasksSummary = processTasksBySource.entrySet().stream().map(entry -> {
|
||||
String tasks = executor.describeTasks(entry.getValue());
|
||||
return tasks.isEmpty() ? entry.getKey() : entry.getKey() + "[" + tasks + "]";
|
||||
}).reduce((s1, s2) -> s1 + ", " + s2).orElse("");
|
||||
|
||||
runTasks(new TaskInputs(executor, toExecute, tasksSummary));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1130,12 +1245,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
||||
countDown = Math.max(1, countDown);
|
||||
logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
|
||||
this.countDown = new CountDown(countDown);
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
onTimeout();
|
||||
}
|
||||
});
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, () -> onTimeout());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -346,11 +346,14 @@ public class Cache<K, V> {
|
||||
* value using the given mapping function and enters it into this map unless null. The load method for a given key
|
||||
* will be invoked at most once.
|
||||
*
|
||||
* Use of different {@link CacheLoader} implementations on the same key concurrently may result in only the first
|
||||
* loader function being called and the second will be returned the result provided by the first including any exceptions
|
||||
* thrown during the execution of the first.
|
||||
*
|
||||
* @param key the key whose associated value is to be returned or computed for if non-existent
|
||||
* @param loader the function to compute a value given a key
|
||||
* @return the current (existing or computed) value associated with the specified key, or null if the computed
|
||||
* value is null
|
||||
* @throws ExecutionException thrown if loader throws an exception
|
||||
* @return the current (existing or computed) non-null value associated with the specified key
|
||||
* @throws ExecutionException thrown if loader throws an exception or returns a null value
|
||||
*/
|
||||
public V computeIfAbsent(K key, CacheLoader<K, V> loader) throws ExecutionException {
|
||||
long now = now();
|
||||
@ -410,6 +413,11 @@ public class Cache<K, V> {
|
||||
|
||||
try {
|
||||
value = completableValue.get();
|
||||
// check to ensure the future hasn't been completed with an exception
|
||||
if (future.isCompletedExceptionally()) {
|
||||
future.get(); // call get to force the exception to be thrown for other concurrent callers
|
||||
throw new IllegalStateException("the future was completed exceptionally but no exception was thrown");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.bootstrap.BootstrapSettings;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.transport.TransportClientNodesService;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
@ -162,11 +162,11 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||
|
||||
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(
|
||||
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind
|
||||
TransportClient.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind
|
||||
// of odd here and should only be valid if we are a transport client
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
TransportClientNodesService.CLIENT_TRANSPORT_SNIFF,
|
||||
TransportClient.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||
TransportClient.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||
TransportClient.CLIENT_TRANSPORT_SNIFF,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
|
||||
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
|
||||
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
|
||||
@ -339,6 +339,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||
ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT,
|
||||
SearchService.DEFAULT_KEEPALIVE_SETTING,
|
||||
SearchService.KEEPALIVE_INTERVAL_SETTING,
|
||||
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
|
||||
|
@ -69,7 +69,7 @@ public final class TransportAddress implements Writeable {
|
||||
* Read from a stream.
|
||||
*/
|
||||
public TransportAddress(StreamInput in) throws IOException {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // bwc layer for 5.x where we had more than one transport address
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // bwc layer for 5.x where we had more than one transport address
|
||||
final short i = in.readShort();
|
||||
if(i != 1) { // we fail hard to ensure nobody tries to use some custom transport address impl even if that is difficult to add
|
||||
throw new AssertionError("illegal transport ID from node of version: " + in.getVersion() + " got: " + i + " expected: 1");
|
||||
@ -85,7 +85,7 @@ public final class TransportAddress implements Writeable {
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeShort((short)1); // this maps to InetSocketTransportAddress in 5.x
|
||||
}
|
||||
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
@ -61,12 +62,17 @@ import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.RejectedExecutionException;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
@ -74,6 +80,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.emptyMap;
|
||||
@ -89,6 +96,8 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
Property.NodeScope);
|
||||
public static final Setting<Integer> DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING =
|
||||
Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, Property.NodeScope);
|
||||
public static final Setting<TimeValue> DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT =
|
||||
Setting.positiveTimeSetting("discovery.zen.ping.unicast.hosts.resolve_timeout", TimeValue.timeValueSeconds(5), Property.NodeScope);
|
||||
|
||||
// these limits are per-address
|
||||
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
|
||||
@ -100,7 +109,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
|
||||
private final int concurrentConnects;
|
||||
|
||||
private final DiscoveryNode[] configuredTargetNodes;
|
||||
private final List<String> configuredHosts;
|
||||
|
||||
private final int limitPortCounts;
|
||||
|
||||
private volatile PingContextProvider contextProvider;
|
||||
|
||||
@ -114,12 +125,14 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
|
||||
private final Map<Integer, SendPingsHandler> receivedResponses = newConcurrentMap();
|
||||
|
||||
// a list of temporal responses a node will return for a request (holds requests from other configuredTargetNodes)
|
||||
// a list of temporal responses a node will return for a request (holds responses from other nodes)
|
||||
private final Queue<PingResponse> temporalResponses = ConcurrentCollections.newQueue();
|
||||
|
||||
private final UnicastHostsProvider hostsProvider;
|
||||
|
||||
private final ExecutorService unicastConnectExecutor;
|
||||
private final ExecutorService unicastZenPingExecutorService;
|
||||
|
||||
private final TimeValue resolveTimeout;
|
||||
|
||||
private volatile boolean closed = false;
|
||||
|
||||
@ -132,62 +145,110 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
this.hostsProvider = unicastHostsProvider;
|
||||
|
||||
this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);
|
||||
List<String> hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);
|
||||
final int limitPortCounts;
|
||||
final List<String> hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);
|
||||
if (hosts.isEmpty()) {
|
||||
// if unicast hosts are not specified, fill with simple defaults on the local machine
|
||||
configuredHosts = transportService.getLocalAddresses();
|
||||
limitPortCounts = LIMIT_LOCAL_PORTS_COUNT;
|
||||
hosts.addAll(transportService.getLocalAddresses());
|
||||
} else {
|
||||
configuredHosts = hosts;
|
||||
// we only limit to 1 addresses, makes no sense to ping 100 ports
|
||||
limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT;
|
||||
}
|
||||
|
||||
logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects);
|
||||
List<DiscoveryNode> configuredTargetNodes = new ArrayList<>();
|
||||
for (final String host : hosts) {
|
||||
configuredTargetNodes.addAll(resolveDiscoveryNodes(host, limitPortCounts, transportService,
|
||||
() -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#"));
|
||||
}
|
||||
this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]);
|
||||
resolveTimeout = DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT.get(settings);
|
||||
logger.debug(
|
||||
"using initial hosts {}, with concurrent_connects [{}], resolve_timeout [{}]",
|
||||
configuredHosts,
|
||||
concurrentConnects,
|
||||
resolveTimeout);
|
||||
|
||||
transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest::new, ThreadPool.Names.SAME,
|
||||
new UnicastPingRequestHandler());
|
||||
|
||||
ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]");
|
||||
unicastConnectExecutor = EsExecutors.newScaling("unicast_connect", 0, concurrentConnects, 60, TimeUnit.SECONDS,
|
||||
threadFactory, threadPool.getThreadContext());
|
||||
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]");
|
||||
unicastZenPingExecutorService = EsExecutors.newScaling(
|
||||
"unicast_connect",
|
||||
0, concurrentConnects,
|
||||
60,
|
||||
TimeUnit.SECONDS,
|
||||
threadFactory,
|
||||
threadPool.getThreadContext());
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves a host to a list of discovery nodes. The host is resolved into a transport
|
||||
* address (or a collection of addresses if the number of ports is greater than one) and
|
||||
* the transport addresses are used to created discovery nodes.
|
||||
* Resolves a list of hosts to a list of discovery nodes. Each host is resolved into a transport address (or a collection of addresses
|
||||
* if the number of ports is greater than one) and the transport addresses are used to created discovery nodes. Host lookups are done
|
||||
* in parallel using specified executor service up to the specified resolve timeout.
|
||||
*
|
||||
* @param host the host to resolve
|
||||
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
|
||||
* @param executorService the executor service used to parallelize hostname lookups
|
||||
* @param logger logger used for logging messages regarding hostname lookups
|
||||
* @param hosts the hosts to resolve
|
||||
* @param limitPortCounts the number of ports to resolve (should be 1 for non-local transport)
|
||||
* @param transportService the transport service
|
||||
* @param idGenerator the generator to supply unique ids for each discovery node
|
||||
* @param idGenerator the generator to supply unique ids for each discovery node
|
||||
* @param resolveTimeout the timeout before returning from hostname lookups
|
||||
* @return a list of discovery nodes with resolved transport addresses
|
||||
*/
|
||||
public static List<DiscoveryNode> resolveDiscoveryNodes(final String host, final int limitPortCounts,
|
||||
final TransportService transportService, final Supplier<String> idGenerator) {
|
||||
List<DiscoveryNode> discoveryNodes = new ArrayList<>();
|
||||
try {
|
||||
TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
|
||||
for (TransportAddress address : addresses) {
|
||||
discoveryNodes.add(new DiscoveryNode(idGenerator.get(), address, emptyMap(), emptySet(),
|
||||
Version.CURRENT.minimumCompatibilityVersion()));
|
||||
public static List<DiscoveryNode> resolveDiscoveryNodes(
|
||||
final ExecutorService executorService,
|
||||
final Logger logger,
|
||||
final List<String> hosts,
|
||||
final int limitPortCounts,
|
||||
final TransportService transportService,
|
||||
final Supplier<String> idGenerator,
|
||||
final TimeValue resolveTimeout) throws InterruptedException {
|
||||
Objects.requireNonNull(executorService);
|
||||
Objects.requireNonNull(logger);
|
||||
Objects.requireNonNull(hosts);
|
||||
Objects.requireNonNull(transportService);
|
||||
Objects.requireNonNull(idGenerator);
|
||||
Objects.requireNonNull(resolveTimeout);
|
||||
if (resolveTimeout.nanos() < 0) {
|
||||
throw new IllegalArgumentException("resolve timeout must be non-negative but was [" + resolveTimeout + "]");
|
||||
}
|
||||
// create tasks to submit to the executor service; we will wait up to resolveTimeout for these tasks to complete
|
||||
final List<Callable<TransportAddress[]>> callables =
|
||||
hosts
|
||||
.stream()
|
||||
.map(hn -> (Callable<TransportAddress[]>)() -> transportService.addressesFromString(hn, limitPortCounts))
|
||||
.collect(Collectors.toList());
|
||||
final List<Future<TransportAddress[]>> futures =
|
||||
executorService.invokeAll(callables, resolveTimeout.nanos(), TimeUnit.NANOSECONDS);
|
||||
final List<DiscoveryNode> discoveryNodes = new ArrayList<>();
|
||||
// ExecutorService#invokeAll guarantees that the futures are returned in the iteration order of the tasks so we can associate the
|
||||
// hostname with the corresponding task by iterating together
|
||||
final Iterator<String> it = hosts.iterator();
|
||||
for (final Future<TransportAddress[]> future : futures) {
|
||||
final String hostname = it.next();
|
||||
if (!future.isCancelled()) {
|
||||
assert future.isDone();
|
||||
try {
|
||||
final TransportAddress[] addresses = future.get();
|
||||
logger.trace("resolved host [{}] to {}", hostname, addresses);
|
||||
for (final TransportAddress address : addresses) {
|
||||
discoveryNodes.add(
|
||||
new DiscoveryNode(
|
||||
idGenerator.get(),
|
||||
address,
|
||||
emptyMap(),
|
||||
emptySet(),
|
||||
Version.CURRENT.minimumCompatibilityVersion()));
|
||||
}
|
||||
} catch (final ExecutionException e) {
|
||||
assert e.getCause() != null;
|
||||
final String message = "failed to resolve host [" + hostname + "]";
|
||||
logger.warn(message, e.getCause());
|
||||
}
|
||||
} else {
|
||||
logger.warn("timed out after [{}] resolving host [{}]", resolveTimeout, hostname);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
|
||||
}
|
||||
return discoveryNodes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS);
|
||||
ThreadPool.terminate(unicastZenPingExecutorService, 0, TimeUnit.SECONDS);
|
||||
Releasables.close(receivedResponses.values());
|
||||
closed = true;
|
||||
}
|
||||
@ -220,27 +281,49 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends three rounds of pings notifying the specified {@link PingListener} when pinging is complete. Pings are sent after resolving
|
||||
* configured unicast hosts to their IP address (subject to DNS caching within the JVM). A batch of pings is sent, then another batch
|
||||
* of pings is sent at half the specified {@link TimeValue}, and then another batch of pings is sent at the specified {@link TimeValue}.
|
||||
* The pings that are sent carry a timeout of 1.25 times the {@link TimeValue}.
|
||||
*
|
||||
* @param listener the callback when pinging is complete
|
||||
* @param duration the timeout for various components of the pings
|
||||
*/
|
||||
@Override
|
||||
public void ping(final PingListener listener, final TimeValue duration) {
|
||||
final List<DiscoveryNode> resolvedDiscoveryNodes;
|
||||
try {
|
||||
resolvedDiscoveryNodes = resolveDiscoveryNodes(
|
||||
unicastZenPingExecutorService,
|
||||
logger,
|
||||
configuredHosts,
|
||||
limitPortCounts,
|
||||
transportService,
|
||||
() -> UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#",
|
||||
resolveTimeout);
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
final SendPingsHandler sendPingsHandler = new SendPingsHandler(pingHandlerIdGenerator.incrementAndGet());
|
||||
try {
|
||||
receivedResponses.put(sendPingsHandler.id(), sendPingsHandler);
|
||||
try {
|
||||
sendPings(duration, null, sendPingsHandler);
|
||||
sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes);
|
||||
} catch (RejectedExecutionException e) {
|
||||
logger.debug("Ping execution rejected", e);
|
||||
// The RejectedExecutionException can come from the fact unicastConnectExecutor is at its max down in sendPings
|
||||
// The RejectedExecutionException can come from the fact unicastZenPingExecutorService is at its max down in sendPings
|
||||
// But don't bail here, we can retry later on after the send ping has been scheduled.
|
||||
}
|
||||
|
||||
threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() {
|
||||
sendPings(duration, null, sendPingsHandler);
|
||||
sendPings(duration, null, sendPingsHandler, resolvedDiscoveryNodes);
|
||||
threadPool.schedule(TimeValue.timeValueMillis(duration.millis() / 2), ThreadPool.Names.GENERIC, new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler);
|
||||
sendPings(duration, TimeValue.timeValueMillis(duration.millis() / 2), sendPingsHandler, resolvedDiscoveryNodes);
|
||||
sendPingsHandler.close();
|
||||
listener.onPing(sendPingsHandler.pingCollection().toList());
|
||||
for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) {
|
||||
@ -305,7 +388,11 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
}
|
||||
|
||||
|
||||
void sendPings(final TimeValue timeout, @Nullable TimeValue waitTime, final SendPingsHandler sendPingsHandler) {
|
||||
void sendPings(
|
||||
final TimeValue timeout,
|
||||
@Nullable TimeValue waitTime,
|
||||
final SendPingsHandler sendPingsHandler,
|
||||
final List<DiscoveryNode> resolvedDiscoveryNodes) {
|
||||
final UnicastPingRequest pingRequest = new UnicastPingRequest();
|
||||
pingRequest.id = sendPingsHandler.id();
|
||||
pingRequest.timeout = timeout;
|
||||
@ -330,8 +417,9 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
// sort the nodes by likelihood of being an active master
|
||||
List<DiscoveryNode> sortedNodesToPing = ElectMasterService.sortByMasterLikelihood(nodesToPingSet);
|
||||
|
||||
// new add the unicast targets first
|
||||
List<DiscoveryNode> nodesToPing = CollectionUtils.arrayAsArrayList(configuredTargetNodes);
|
||||
// add the configured hosts first
|
||||
final List<DiscoveryNode> nodesToPing = new ArrayList<>(resolvedDiscoveryNodes.size() + sortedNodesToPing.size());
|
||||
nodesToPing.addAll(resolvedDiscoveryNodes);
|
||||
nodesToPing.addAll(sortedNodesToPing);
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(nodesToPing.size());
|
||||
@ -369,7 +457,7 @@ public class UnicastZenPing extends AbstractComponent implements ZenPing {
|
||||
}
|
||||
// fork the connection to another thread
|
||||
final DiscoveryNode finalNodeToSend = nodeToSend;
|
||||
unicastConnectExecutor.execute(new Runnable() {
|
||||
unicastZenPingExecutorService.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
if (sendPingsHandler.isClosed()) {
|
||||
|
@ -38,6 +38,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.ShardLockObtainFailedException;
|
||||
import org.elasticsearch.gateway.AsyncShardFetch.FetchResult;
|
||||
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
@ -256,6 +257,11 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||
return nodeDecisions;
|
||||
}
|
||||
|
||||
private static final Comparator<NodeGatewayStartedShards> NO_STORE_EXCEPTION_FIRST_COMPARATOR =
|
||||
Comparator.comparing((NodeGatewayStartedShards state) -> state.storeException() == null).reversed();
|
||||
private static final Comparator<NodeGatewayStartedShards> PRIMARY_FIRST_COMPARATOR =
|
||||
Comparator.comparing(NodeGatewayStartedShards::primary).reversed();
|
||||
|
||||
/**
|
||||
* Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching
|
||||
* inSyncAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
|
||||
@ -265,8 +271,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||
Set<String> ignoreNodes, Set<String> inSyncAllocationIds,
|
||||
FetchResult<NodeGatewayStartedShards> shardState,
|
||||
Logger logger) {
|
||||
LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>();
|
||||
LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>();
|
||||
List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>();
|
||||
int numberOfAllocationsFound = 0;
|
||||
for (NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
|
||||
DiscoveryNode node = nodeShardState.getNode();
|
||||
@ -287,31 +292,36 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||
}
|
||||
} else {
|
||||
final String finalAllocationId = allocationId;
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
||||
allocationId = null;
|
||||
if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
||||
} else {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException());
|
||||
allocationId = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (allocationId != null) {
|
||||
assert nodeShardState.storeException() == null ||
|
||||
nodeShardState.storeException() instanceof ShardLockObtainFailedException :
|
||||
"only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a store throwing " + nodeShardState.storeException();
|
||||
numberOfAllocationsFound++;
|
||||
if (inSyncAllocationIds.contains(allocationId)) {
|
||||
if (nodeShardState.primary()) {
|
||||
matchingNodeShardStates.addFirst(nodeShardState);
|
||||
} else {
|
||||
matchingNodeShardStates.addLast(nodeShardState);
|
||||
}
|
||||
} else if (matchAnyShard) {
|
||||
if (nodeShardState.primary()) {
|
||||
nonMatchingNodeShardStates.addFirst(nodeShardState);
|
||||
} else {
|
||||
nonMatchingNodeShardStates.addLast(nodeShardState);
|
||||
}
|
||||
if (matchAnyShard || inSyncAllocationIds.contains(nodeShardState.allocationId())) {
|
||||
nodeShardStates.add(nodeShardState);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
List<NodeGatewayStartedShards> nodeShardStates = new ArrayList<>();
|
||||
nodeShardStates.addAll(matchingNodeShardStates);
|
||||
nodeShardStates.addAll(nonMatchingNodeShardStates);
|
||||
final Comparator<NodeGatewayStartedShards> comparator; // allocation preference
|
||||
if (matchAnyShard) {
|
||||
// prefer shards with matching allocation ids
|
||||
Comparator<NodeGatewayStartedShards> matchingAllocationsFirst = Comparator.comparing(
|
||||
(NodeGatewayStartedShards state) -> inSyncAllocationIds.contains(state.allocationId())).reversed();
|
||||
comparator = matchingAllocationsFirst.thenComparing(NO_STORE_EXCEPTION_FIRST_COMPARATOR).thenComparing(PRIMARY_FIRST_COMPARATOR);
|
||||
} else {
|
||||
comparator = NO_STORE_EXCEPTION_FIRST_COMPARATOR.thenComparing(PRIMARY_FIRST_COMPARATOR);
|
||||
}
|
||||
|
||||
nodeShardStates.sort(comparator);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("{} candidates for allocation: {}", shard, nodeShardStates.stream().map(s -> s.getNode().getName()).collect(Collectors.joining(", ")));
|
||||
@ -412,10 +422,19 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}]", shard, nodeShardState.getNode(), nodeShardState.allocationId());
|
||||
}
|
||||
} else {
|
||||
final long finalVerison = version;
|
||||
// when there is an store exception, we disregard the reported version and assign it as no version (same as shard does not exist)
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", shard, nodeShardState.getNode(), finalVerison), nodeShardState.storeException());
|
||||
version = ShardStateMetaData.NO_VERSION;
|
||||
final long finalVersion = version;
|
||||
if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) {
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has version [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalVersion), nodeShardState.storeException());
|
||||
if (nodeShardState.allocationId() != null) {
|
||||
version = Long.MAX_VALUE; // shard was already selected in a 5.x cluster as primary, prefer this shard copy again.
|
||||
} else {
|
||||
version = 0L; // treat as lowest version so that this shard is the least likely to be selected as primary
|
||||
}
|
||||
} else {
|
||||
// disregard the reported version and assign it as no version (same as shard does not exist)
|
||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", shard, nodeShardState.getNode(), finalVersion), nodeShardState.storeException());
|
||||
version = ShardStateMetaData.NO_VERSION;
|
||||
}
|
||||
}
|
||||
|
||||
if (version != ShardStateMetaData.NO_VERSION) {
|
||||
|
@ -414,7 +414,7 @@ public class InternalEngine extends Engine {
|
||||
final long expectedVersion,
|
||||
final boolean deleted) {
|
||||
if (op.versionType() == VersionType.FORCE) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
// If index was created in 5.0 or later, 'force' is not allowed at all
|
||||
throw new IllegalArgumentException("version type [FORCE] may not be used for indices created after 6.0");
|
||||
} else if (op.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
@ -528,7 +528,7 @@ public class InternalEngine extends Engine {
|
||||
}
|
||||
|
||||
private boolean assertSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
// legacy support
|
||||
assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "old op recovering but it already has a seq no." +
|
||||
" index version: " + engineConfig.getIndexSettings().getIndexVersionCreated() + ". seq no: " + seqNo;
|
||||
@ -1097,15 +1097,21 @@ public class InternalEngine extends Engine {
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("finally")
|
||||
private void failOnTragicEvent(AlreadyClosedException ex) {
|
||||
// if we are already closed due to some tragic exception
|
||||
// we need to fail the engine. it might have already been failed before
|
||||
// but we are double-checking it's failed and closed
|
||||
if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) {
|
||||
final Exception tragedy = indexWriter.getTragicException() instanceof Exception ?
|
||||
(Exception) indexWriter.getTragicException() :
|
||||
new Exception(indexWriter.getTragicException());
|
||||
failEngine("already closed by tragic event on the index writer", tragedy);
|
||||
if (indexWriter.getTragicException() instanceof Error) {
|
||||
try {
|
||||
logger.error("tragic event in index writer", ex);
|
||||
} finally {
|
||||
throw (Error) indexWriter.getTragicException();
|
||||
}
|
||||
} else {
|
||||
failEngine("already closed by tragic event on the index writer", (Exception) indexWriter.getTragicException());
|
||||
}
|
||||
} else if (translog.isOpen() == false && translog.getTragicException() != null) {
|
||||
failEngine("already closed by tragic event on the translog", translog.getTragicException());
|
||||
} else if (failedEngine.get() == null) { // we are closed but the engine is not failed yet?
|
||||
|
@ -25,7 +25,6 @@ import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.geo.GeoHashUtils;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.MutableDateTime;
|
||||
import org.joda.time.ReadableDateTime;
|
||||
@ -325,4 +324,46 @@ public interface ScriptDocValues<T> extends List<T> {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class BytesRefs extends AbstractList<BytesRef> implements ScriptDocValues<BytesRef> {
|
||||
|
||||
private final SortedBinaryDocValues values;
|
||||
|
||||
public BytesRefs(SortedBinaryDocValues values) {
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextDocId(int docId) {
|
||||
values.setDocument(docId);
|
||||
}
|
||||
|
||||
public SortedBinaryDocValues getInternalValues() {
|
||||
return this.values;
|
||||
}
|
||||
|
||||
public BytesRef getValue() {
|
||||
int numValues = values.count();
|
||||
if (numValues == 0) {
|
||||
return new BytesRef();
|
||||
}
|
||||
return values.valueAt(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<BytesRef> getValues() {
|
||||
return Collections.unmodifiableList(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef get(int index) {
|
||||
return values.valueAt(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return values.count();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ final class BytesBinaryDVAtomicFieldData implements AtomicFieldData {
|
||||
|
||||
@Override
|
||||
public ScriptDocValues getScriptValues() {
|
||||
throw new UnsupportedOperationException();
|
||||
return new ScriptDocValues.BytesRefs(getBytesValues());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -113,15 +113,6 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier {
|
||||
return new QueryParseContext(indicesQueriesRegistry, parser, indexSettings.getParseFieldMatcher());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link QueryParseContext} like {@link #newParseContext(XContentParser)} with the only diffence, that
|
||||
* the default script language will default to what has been set in the 'script.legacy.default_lang' setting.
|
||||
*/
|
||||
public QueryParseContext newParseContextWithLegacyScriptLanguage(XContentParser parser) {
|
||||
String defaultScriptLanguage = ScriptSettings.getLegacyDefaultLang(indexSettings.getNodeSettings());
|
||||
return new QueryParseContext(defaultScriptLanguage, indicesQueriesRegistry, parser, indexSettings.getParseFieldMatcher());
|
||||
}
|
||||
|
||||
public long nowInMillis() {
|
||||
return nowInMillis.getAsLong();
|
||||
}
|
||||
|
@ -71,9 +71,8 @@ import java.util.TreeMap;
|
||||
* them either using DisMax or a plain boolean query (see {@link #useDisMax(boolean)}).
|
||||
*/
|
||||
public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQueryBuilder> {
|
||||
public static final String NAME = "query_string";
|
||||
|
||||
public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
public static final String NAME = "query_string";
|
||||
|
||||
public static final boolean DEFAULT_AUTO_GENERATE_PHRASE_QUERIES = false;
|
||||
public static final int DEFAULT_MAX_DETERMINED_STATES = Operations.DEFAULT_MAX_DETERMINIZED_STATES;
|
||||
@ -219,11 +218,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
||||
autoGeneratePhraseQueries = in.readBoolean();
|
||||
allowLeadingWildcard = in.readOptionalBoolean();
|
||||
analyzeWildcard = in.readOptionalBoolean();
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
in.readBoolean(); // lowercase_expanded_terms
|
||||
}
|
||||
enablePositionIncrements = in.readBoolean();
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
in.readString(); // locale
|
||||
}
|
||||
fuzziness = new Fuzziness(in);
|
||||
@ -239,7 +238,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
||||
timeZone = in.readOptionalTimeZone();
|
||||
escape = in.readBoolean();
|
||||
maxDeterminizedStates = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
splitOnWhitespace = in.readBoolean();
|
||||
useAllFields = in.readOptionalBoolean();
|
||||
} else {
|
||||
@ -263,11 +262,11 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
||||
out.writeBoolean(this.autoGeneratePhraseQueries);
|
||||
out.writeOptionalBoolean(this.allowLeadingWildcard);
|
||||
out.writeOptionalBoolean(this.analyzeWildcard);
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(true); // lowercase_expanded_terms
|
||||
}
|
||||
out.writeBoolean(this.enablePositionIncrements);
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeString(Locale.ROOT.toLanguageTag()); // locale
|
||||
}
|
||||
this.fuzziness.writeTo(out);
|
||||
@ -283,7 +282,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
||||
out.writeOptionalTimeZone(timeZone);
|
||||
out.writeBoolean(this.escape);
|
||||
out.writeVInt(this.maxDeterminizedStates);
|
||||
if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(this.splitOnWhitespace);
|
||||
out.writeOptionalBoolean(this.useAllFields);
|
||||
}
|
||||
@ -1058,4 +1057,5 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -79,6 +79,7 @@ import java.util.TreeMap;
|
||||
* > online documentation</a>.
|
||||
*/
|
||||
public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQueryStringBuilder> {
|
||||
|
||||
/** Default for using lenient query parsing.*/
|
||||
public static final boolean DEFAULT_LENIENT = false;
|
||||
/** Default for wildcard analysis.*/
|
||||
@ -91,8 +92,6 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
||||
/** Name for (de-)serialization. */
|
||||
public static final String NAME = "simple_query_string";
|
||||
|
||||
public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
|
||||
private static final ParseField MINIMUM_SHOULD_MATCH_FIELD = new ParseField("minimum_should_match");
|
||||
private static final ParseField ANALYZE_WILDCARD_FIELD = new ParseField("analyze_wildcard");
|
||||
private static final ParseField LENIENT_FIELD = new ParseField("lenient");
|
||||
@ -160,19 +159,19 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
||||
flags = in.readInt();
|
||||
analyzer = in.readOptionalString();
|
||||
defaultOperator = Operator.readFromStream(in);
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
in.readBoolean(); // lowercase_expanded_terms
|
||||
}
|
||||
settings.lenient(in.readBoolean());
|
||||
if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
this.lenientSet = in.readBoolean();
|
||||
}
|
||||
settings.analyzeWildcard(in.readBoolean());
|
||||
if (in.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
in.readString(); // locale
|
||||
}
|
||||
minimumShouldMatch = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
settings.quoteFieldSuffix(in.readOptionalString());
|
||||
useAllFields = in.readOptionalBoolean();
|
||||
}
|
||||
@ -189,19 +188,19 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
||||
out.writeInt(flags);
|
||||
out.writeOptionalString(analyzer);
|
||||
defaultOperator.writeTo(out);
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(true); // lowercase_expanded_terms
|
||||
}
|
||||
out.writeBoolean(settings.lenient());
|
||||
if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeBoolean(lenientSet);
|
||||
}
|
||||
out.writeBoolean(settings.analyzeWildcard());
|
||||
if (out.getVersion().before(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeString(Locale.ROOT.toLanguageTag()); // locale
|
||||
}
|
||||
out.writeOptionalString(minimumShouldMatch);
|
||||
if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeOptionalString(settings.quoteFieldSuffix());
|
||||
out.writeOptionalBoolean(useAllFields);
|
||||
}
|
||||
@ -603,4 +602,5 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
|
||||
&& (flags == other.flags)
|
||||
&& (useAllFields == other.useAllFields);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -414,15 +414,12 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||
* segment infos and possible corruption markers. If the index can not
|
||||
* be opened, an exception is thrown
|
||||
*/
|
||||
public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException {
|
||||
public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException {
|
||||
try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5));
|
||||
Directory dir = new SimpleFSDirectory(indexLocation)) {
|
||||
failIfCorrupted(dir, shardId);
|
||||
SegmentInfos segInfo = Lucene.readSegmentInfos(dir);
|
||||
logger.trace("{} loaded segment info [{}]", shardId, segInfo);
|
||||
} catch (ShardLockObtainFailedException ex) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("{} unable to acquire shard lock", shardId), ex);
|
||||
throw new IOException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ public class StoreStats implements Streamable, ToXContent {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
sizeInBytes = in.readVLong();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
in.readVLong(); // throttleTimeInNanos
|
||||
}
|
||||
}
|
||||
@ -82,7 +82,7 @@ public class StoreStats implements Streamable, ToXContent {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(sizeInBytes);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeVLong(0L); // throttleTimeInNanos
|
||||
}
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
public class OsStats implements Writeable, ToXContent {
|
||||
public static final Version V_5_1_0 = Version.fromId(5010099);
|
||||
|
||||
private final long timestamp;
|
||||
private final Cpu cpu;
|
||||
private final Mem mem;
|
||||
@ -52,7 +52,7 @@ public class OsStats implements Writeable, ToXContent {
|
||||
this.cpu = new Cpu(in);
|
||||
this.mem = new Mem(in);
|
||||
this.swap = new Swap(in);
|
||||
if (in.getVersion().onOrAfter(V_5_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
this.cgroup = in.readOptionalWriteable(Cgroup::new);
|
||||
} else {
|
||||
this.cgroup = null;
|
||||
@ -65,7 +65,7 @@ public class OsStats implements Writeable, ToXContent {
|
||||
cpu.writeTo(out);
|
||||
mem.writeTo(out);
|
||||
swap.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(V_5_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeOptionalWriteable(cgroup);
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,8 @@ import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.action.GenericAction;
|
||||
import org.elasticsearch.action.search.SearchPhaseController;
|
||||
import org.elasticsearch.action.search.SearchTransportService;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.update.UpdateHelper;
|
||||
import org.elasticsearch.client.Client;
|
||||
@ -401,7 +403,6 @@ public class Node implements Closeable {
|
||||
|
||||
final DiscoveryModule discoveryModule = new DiscoveryModule(this.settings, threadPool, transportService,
|
||||
networkService, clusterService, pluginsService.filterPlugins(DiscoveryPlugin.class));
|
||||
pluginsService.processModules(modules);
|
||||
modules.add(b -> {
|
||||
b.bind(IndicesQueriesRegistry.class).toInstance(searchModule.getQueryParserRegistry());
|
||||
b.bind(SearchRequestParsers.class).toInstance(searchModule.getSearchRequestParsers());
|
||||
@ -425,6 +426,9 @@ public class Node implements Closeable {
|
||||
b.bind(IndicesService.class).toInstance(indicesService);
|
||||
b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService,
|
||||
threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase()));
|
||||
b.bind(SearchTransportService.class).toInstance(new SearchTransportService(settings, transportService));
|
||||
b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, bigArrays,
|
||||
scriptModule.getScriptService()));
|
||||
b.bind(Transport.class).toInstance(transport);
|
||||
b.bind(TransportService.class).toInstance(transportService);
|
||||
b.bind(NetworkService.class).toInstance(networkService);
|
||||
|
@ -76,8 +76,6 @@ public class PluginsService extends AbstractComponent {
|
||||
public static final Setting<List<String>> MANDATORY_SETTING =
|
||||
Setting.listSetting("plugin.mandatory", Collections.emptyList(), Function.identity(), Property.NodeScope);
|
||||
|
||||
private final Map<Plugin, List<OnModuleReference>> onModuleReferences;
|
||||
|
||||
public List<Setting<?>> getPluginSettings() {
|
||||
return plugins.stream().flatMap(p -> p.v2().getSettings().stream()).collect(Collectors.toList());
|
||||
}
|
||||
@ -86,16 +84,6 @@ public class PluginsService extends AbstractComponent {
|
||||
return plugins.stream().flatMap(p -> p.v2().getSettingsFilter().stream()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
static class OnModuleReference {
|
||||
public final Class<? extends Module> moduleClass;
|
||||
public final Method onModuleMethod;
|
||||
|
||||
OnModuleReference(Class<? extends Module> moduleClass, Method onModuleMethod) {
|
||||
this.moduleClass = moduleClass;
|
||||
this.onModuleMethod = onModuleMethod;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new PluginService
|
||||
* @param settings The settings of the system
|
||||
@ -175,40 +163,6 @@ public class PluginsService extends AbstractComponent {
|
||||
// but for now: just be transparent so we can debug any potential issues
|
||||
logPluginInfo(info.getModuleInfos(), "module", logger);
|
||||
logPluginInfo(info.getPluginInfos(), "plugin", logger);
|
||||
|
||||
Map<Plugin, List<OnModuleReference>> onModuleReferences = new HashMap<>();
|
||||
for (Tuple<PluginInfo, Plugin> pluginEntry : this.plugins) {
|
||||
Plugin plugin = pluginEntry.v2();
|
||||
List<OnModuleReference> list = new ArrayList<>();
|
||||
for (Method method : plugin.getClass().getMethods()) {
|
||||
if (!method.getName().equals("onModule")) {
|
||||
continue;
|
||||
}
|
||||
// this is a deprecated final method, so all Plugin subclasses have it
|
||||
if (method.getParameterTypes().length == 1 && method.getParameterTypes()[0].equals(IndexModule.class)) {
|
||||
continue;
|
||||
}
|
||||
if (method.getParameterTypes().length == 0 || method.getParameterTypes().length > 1) {
|
||||
logger.warn("Plugin: {} implementing onModule with no parameters or more than one parameter", pluginEntry.v1().getName());
|
||||
continue;
|
||||
}
|
||||
Class moduleClass = method.getParameterTypes()[0];
|
||||
if (!Module.class.isAssignableFrom(moduleClass)) {
|
||||
if (method.getDeclaringClass() == Plugin.class) {
|
||||
// These are still part of the Plugin class to point the user to the new implementations
|
||||
continue;
|
||||
}
|
||||
throw new RuntimeException(
|
||||
"Plugin: [" + pluginEntry.v1().getName() + "] implements onModule taking a parameter that isn't a Module ["
|
||||
+ moduleClass.getSimpleName() + "]");
|
||||
}
|
||||
list.add(new OnModuleReference(moduleClass, method));
|
||||
}
|
||||
if (!list.isEmpty()) {
|
||||
onModuleReferences.put(plugin, list);
|
||||
}
|
||||
}
|
||||
this.onModuleReferences = Collections.unmodifiableMap(onModuleReferences);
|
||||
}
|
||||
|
||||
private static void logPluginInfo(final List<PluginInfo> pluginInfos, final String type, final Logger logger) {
|
||||
@ -222,38 +176,6 @@ public class PluginsService extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
private List<Tuple<PluginInfo, Plugin>> plugins() {
|
||||
return plugins;
|
||||
}
|
||||
|
||||
public void processModules(Iterable<Module> modules) {
|
||||
for (Module module : modules) {
|
||||
processModule(module);
|
||||
}
|
||||
}
|
||||
|
||||
public void processModule(Module module) {
|
||||
for (Tuple<PluginInfo, Plugin> plugin : plugins()) {
|
||||
// see if there are onModule references
|
||||
List<OnModuleReference> references = onModuleReferences.get(plugin.v2());
|
||||
if (references != null) {
|
||||
for (OnModuleReference reference : references) {
|
||||
if (reference.moduleClass.isAssignableFrom(module.getClass())) {
|
||||
try {
|
||||
reference.onModuleMethod.invoke(plugin.v2(), module);
|
||||
} catch (IllegalAccessException | InvocationTargetException e) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("plugin {}, failed to invoke custom onModule method", plugin.v1().getName()), e);
|
||||
throw new ElasticsearchException("failed to invoke onModule", e);
|
||||
} catch (Exception e) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("plugin {}, failed to invoke custom onModule method", plugin.v1().getName()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public Settings updatedSettings() {
|
||||
Map<String, String> foundSettings = new HashMap<>();
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
|
@ -45,8 +45,6 @@ public class RestClusterSearchShardsAction extends BaseRestHandler {
|
||||
controller.registerHandler(POST, "/_search_shards", this);
|
||||
controller.registerHandler(GET, "/{index}/_search_shards", this);
|
||||
controller.registerHandler(POST, "/{index}/_search_shards", this);
|
||||
controller.registerHandler(GET, "/{index}/{type}/_search_shards", this);
|
||||
controller.registerHandler(POST, "/{index}/{type}/_search_shards", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -54,12 +52,9 @@ public class RestClusterSearchShardsAction extends BaseRestHandler {
|
||||
String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
|
||||
final ClusterSearchShardsRequest clusterSearchShardsRequest = Requests.clusterSearchShardsRequest(indices);
|
||||
clusterSearchShardsRequest.local(request.paramAsBoolean("local", clusterSearchShardsRequest.local()));
|
||||
|
||||
clusterSearchShardsRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
|
||||
clusterSearchShardsRequest.routing(request.param("routing"));
|
||||
clusterSearchShardsRequest.preference(request.param("preference"));
|
||||
clusterSearchShardsRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterSearchShardsRequest.indicesOptions()));
|
||||
|
||||
return channel -> client.admin().cluster().searchShards(clusterSearchShardsRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
||||
|
@ -91,11 +91,6 @@ public final class Script implements ToXContent, Writeable {
|
||||
*/
|
||||
public static final ParseField PARAMS_PARSE_FIELD = new ParseField("params");
|
||||
|
||||
/**
|
||||
* Unreleased version used for {@link Script} non-null members format of read/write.
|
||||
*/
|
||||
public static final Version V_5_1_0_UNRELEASED = Version.fromId(5010099);
|
||||
|
||||
/**
|
||||
* Helper class used by {@link ObjectParser} to store mutable {@link Script} variables and then
|
||||
* construct an immutable {@link Script} object based on parsed XContent.
|
||||
@ -382,7 +377,7 @@ public final class Script implements ToXContent, Writeable {
|
||||
// Version 5.1+ requires all Script members to be non-null and supports the potential
|
||||
// for more options than just XContentType. Reorders the read in contents to be in
|
||||
// same order as the constructor.
|
||||
if (in.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
this.type = ScriptType.readFrom(in);
|
||||
this.lang = in.readString();
|
||||
this.idOrCode = in.readString();
|
||||
@ -434,7 +429,7 @@ public final class Script implements ToXContent, Writeable {
|
||||
// Version 5.1+ requires all Script members to be non-null and supports the potential
|
||||
// for more options than just XContentType. Reorders the written out contents to be in
|
||||
// same order as the constructor.
|
||||
if (out.getVersion().onOrAfter(V_5_1_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
type.writeTo(out);
|
||||
out.writeString(lang);
|
||||
out.writeString(idOrCode);
|
||||
|
@ -32,17 +32,6 @@ import java.util.function.Function;
|
||||
|
||||
public class ScriptSettings {
|
||||
|
||||
static final String LEGACY_DEFAULT_LANG = "groovy";
|
||||
|
||||
/**
|
||||
* The default script language to use for scripts that are stored in documents that have no script lang set explicitly.
|
||||
* This setting is legacy setting and only applies for indices created on ES versions prior to version 5.0
|
||||
*
|
||||
* This constant will be removed in the next major release.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final String LEGACY_SCRIPT_SETTING = "script.legacy.default_lang";
|
||||
|
||||
private static final Map<ScriptType, Setting<Boolean>> SCRIPT_TYPE_SETTING_MAP;
|
||||
|
||||
static {
|
||||
@ -58,7 +47,6 @@ public class ScriptSettings {
|
||||
|
||||
private final Map<ScriptContext, Setting<Boolean>> scriptContextSettingMap;
|
||||
private final List<Setting<Boolean>> scriptLanguageSettings;
|
||||
private final Setting<String> defaultLegacyScriptLanguageSetting;
|
||||
|
||||
public ScriptSettings(ScriptEngineRegistry scriptEngineRegistry, ScriptContextRegistry scriptContextRegistry) {
|
||||
Map<ScriptContext, Setting<Boolean>> scriptContextSettingMap = contextSettings(scriptContextRegistry);
|
||||
@ -66,13 +54,6 @@ public class ScriptSettings {
|
||||
|
||||
List<Setting<Boolean>> scriptLanguageSettings = languageSettings(SCRIPT_TYPE_SETTING_MAP, scriptContextSettingMap, scriptEngineRegistry, scriptContextRegistry);
|
||||
this.scriptLanguageSettings = Collections.unmodifiableList(scriptLanguageSettings);
|
||||
|
||||
this.defaultLegacyScriptLanguageSetting = new Setting<>(LEGACY_SCRIPT_SETTING, LEGACY_DEFAULT_LANG, setting -> {
|
||||
if (!LEGACY_DEFAULT_LANG.equals(setting) && !scriptEngineRegistry.getRegisteredLanguages().containsKey(setting)) {
|
||||
throw new IllegalArgumentException("unregistered default language [" + setting + "]");
|
||||
}
|
||||
return setting;
|
||||
}, Property.NodeScope);
|
||||
}
|
||||
|
||||
private static Map<ScriptContext, Setting<Boolean>> contextSettings(ScriptContextRegistry scriptContextRegistry) {
|
||||
@ -169,19 +150,10 @@ public class ScriptSettings {
|
||||
settings.addAll(SCRIPT_TYPE_SETTING_MAP.values());
|
||||
settings.addAll(scriptContextSettingMap.values());
|
||||
settings.addAll(scriptLanguageSettings);
|
||||
settings.add(defaultLegacyScriptLanguageSetting);
|
||||
return settings;
|
||||
}
|
||||
|
||||
public Iterable<Setting<Boolean>> getScriptLanguageSettings() {
|
||||
return scriptLanguageSettings;
|
||||
}
|
||||
|
||||
public Setting<String> getDefaultLegacyScriptLanguageSetting() {
|
||||
return defaultLegacyScriptLanguageSetting;
|
||||
}
|
||||
|
||||
public static String getLegacyDefaultLang(Settings settings) {
|
||||
return settings.get(LEGACY_SCRIPT_SETTING, ScriptSettings.LEGACY_DEFAULT_LANG);
|
||||
}
|
||||
}
|
||||
|
@ -114,4 +114,10 @@ public class ShardFetchRequest extends TransportRequest {
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new SearchTask(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "id[" + id + "], size[" + size + "], lastEmittedDoc[" + lastEmittedDoc + "]";
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ import java.util.Objects;
|
||||
* Represents a {@link QueryBuilder} and a list of alias names that filters the builder is composed of.
|
||||
*/
|
||||
public final class AliasFilter implements Writeable {
|
||||
public static final Version V_5_1_0 = Version.fromId(5010099);
|
||||
|
||||
private final String[] aliases;
|
||||
private final QueryBuilder filter;
|
||||
private final boolean reparseAliases;
|
||||
@ -49,7 +49,7 @@ public final class AliasFilter implements Writeable {
|
||||
|
||||
public AliasFilter(StreamInput input) throws IOException {
|
||||
aliases = input.readStringArray();
|
||||
if (input.getVersion().onOrAfter(V_5_1_0)) {
|
||||
if (input.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
filter = input.readOptionalNamedWriteable(QueryBuilder.class);
|
||||
reparseAliases = false;
|
||||
} else {
|
||||
@ -78,7 +78,7 @@ public final class AliasFilter implements Writeable {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeStringArray(aliases);
|
||||
if (out.getVersion().onOrAfter(V_5_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
out.writeOptionalNamedWriteable(filter);
|
||||
}
|
||||
}
|
||||
|
@ -75,4 +75,10 @@ public class InternalScrollSearchRequest extends TransportRequest {
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new SearchTask(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "id[" + id + "], scroll[" + scroll + "]";
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.search.internal;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
@ -73,9 +72,9 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
||||
ShardSearchLocalRequest() {
|
||||
}
|
||||
|
||||
ShardSearchLocalRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards,
|
||||
ShardSearchLocalRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards,
|
||||
AliasFilter aliasFilter, long nowInMillis) {
|
||||
this(shardRouting.shardId(), numberOfShards, searchRequest.searchType(),
|
||||
this(shardId, numberOfShards, searchRequest.searchType(),
|
||||
searchRequest.source(), searchRequest.types(), searchRequest.requestCache(), aliasFilter);
|
||||
this.scroll = searchRequest.scroll();
|
||||
this.nowInMillis = nowInMillis;
|
||||
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchTask;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
@ -54,9 +53,9 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
||||
public ShardSearchTransportRequest(){
|
||||
}
|
||||
|
||||
public ShardSearchTransportRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards,
|
||||
public ShardSearchTransportRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards,
|
||||
AliasFilter aliasFilter, long nowInMillis) {
|
||||
this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardRouting, numberOfShards, aliasFilter, nowInMillis);
|
||||
this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, nowInMillis);
|
||||
this.originalIndices = new OriginalIndices(searchRequest);
|
||||
}
|
||||
|
||||
@ -166,4 +165,10 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new SearchTask(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
// Shard id is enough here, the request itself can be found by looking at the parent task description
|
||||
return "shardId[" + shardSearchLocalRequest.shardId() + "]";
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchTask;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
@ -90,4 +91,16 @@ public class QuerySearchRequest extends TransportRequest implements IndicesReque
|
||||
public Task createTask(long id, String type, String action, TaskId parentTaskId) {
|
||||
return new SearchTask(id, type, action, getDescription(), parentTaskId);
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("id[");
|
||||
sb.append(id);
|
||||
sb.append("], ");
|
||||
sb.append("indices[");
|
||||
Strings.arrayToDelimitedString(originalIndices.indices(), ",", sb);
|
||||
sb.append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -871,7 +871,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
ShardId shardId = shardStatus.key;
|
||||
ShardSnapshotStatus status = shardStatus.value;
|
||||
if (status.state().failed()) {
|
||||
failures.add(new ShardSearchFailure(status.reason(), new SearchShardTarget(status.nodeId(), shardId.getIndex(), shardId.id())));
|
||||
failures.add(new ShardSearchFailure(status.reason(), new SearchShardTarget(status.nodeId(), shardId)));
|
||||
shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, status.reason()));
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,6 @@ package org.elasticsearch.transport;
|
||||
|
||||
import com.carrotsearch.hppc.IntHashSet;
|
||||
import com.carrotsearch.hppc.IntSet;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
@ -715,7 +714,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
|
||||
return parse(address, settings.get("transport.profiles.default.port", TransportSettings.PORT.get(settings)), perAddressLimit);
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -53,7 +54,7 @@ public interface Transport extends LifecycleComponent {
|
||||
/**
|
||||
* Returns an address from its string representation.
|
||||
*/
|
||||
TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception;
|
||||
TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException;
|
||||
|
||||
/**
|
||||
* Is the address type supported.
|
||||
|
@ -52,6 +52,7 @@ import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
@ -546,6 +547,8 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
holderToNotify.handler().handleException(sendRequestException);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
logger.debug("Exception while sending request, handler likely already notified due to timeout", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -617,7 +620,7 @@ public class TransportService extends AbstractLifecycleComponent {
|
||||
return requestIds.getAndIncrement();
|
||||
}
|
||||
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
|
||||
return transport.addressesFromString(address, perAddressLimit);
|
||||
}
|
||||
|
||||
|
@ -842,9 +842,9 @@ public class ExceptionSerializationTests extends ESTestCase {
|
||||
ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom");
|
||||
Version version = VersionUtils.randomVersionBetween(random(),
|
||||
Version.V_5_0_0, Version.CURRENT);
|
||||
if (version.before(ElasticsearchException.V_5_1_0_UNRELEASED)) {
|
||||
// remove this once 5_1_0 is released randomVersionBetween asserts that this version is in the constant table..
|
||||
version = ElasticsearchException.V_5_1_0_UNRELEASED;
|
||||
if (version.before(Version.V_5_0_2_UNRELEASED)) {
|
||||
// remove this once 5_0_2 is released randomVersionBetween asserts that this version is in the constant table..
|
||||
version = Version.V_5_0_2_UNRELEASED;
|
||||
}
|
||||
ShardLockObtainFailedException ex = serialize(orig, version);
|
||||
assertEquals(orig.getMessage(), ex.getMessage());
|
||||
@ -854,7 +854,7 @@ public class ExceptionSerializationTests extends ESTestCase {
|
||||
public void testBWCShardLockObtainFailedException() throws IOException {
|
||||
ShardId shardId = new ShardId("foo", "_na_", 1);
|
||||
ShardLockObtainFailedException orig = new ShardLockObtainFailedException(shardId, "boom");
|
||||
Exception ex = serialize((Exception)orig, Version.V_5_0_0);
|
||||
Exception ex = serialize((Exception)orig, randomFrom(Version.V_5_0_0, Version.V_5_0_1));
|
||||
assertThat(ex, instanceOf(NotSerializableExceptionWrapper.class));
|
||||
assertEquals("shard_lock_obtain_failed_exception: [foo][1]: boom", ex.getMessage());
|
||||
}
|
||||
|
@ -22,19 +22,16 @@ package org.elasticsearch;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryStringQueryBuilder;
|
||||
import org.elasticsearch.index.query.SimpleQueryStringBuilder;
|
||||
import org.elasticsearch.monitor.os.OsStats;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.Version.V_2_2_0;
|
||||
import static org.elasticsearch.Version.V_5_0_0_alpha1;
|
||||
@ -133,11 +130,11 @@ public class VersionTests extends ESTestCase {
|
||||
assertThat(Version.V_5_0_0_alpha1.minimumCompatibilityVersion(), equalTo(Version.V_5_0_0_alpha1));
|
||||
// from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is
|
||||
// released since we need to bump the supported minor in Version#minimumCompatibilityVersion()
|
||||
Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1);
|
||||
assertEquals(lastVersion.major, Version.V_6_0_0_alpha1.minimumCompatibilityVersion().major);
|
||||
Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
assertEquals(lastVersion.major, Version.V_6_0_0_alpha1_UNRELEASED.minimumCompatibilityVersion().major);
|
||||
assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()",
|
||||
lastVersion.minor, Version.V_6_0_0_alpha1.minimumCompatibilityVersion().minor);
|
||||
assertEquals(0, Version.V_6_0_0_alpha1.minimumCompatibilityVersion().revision);
|
||||
lastVersion.minor, Version.V_6_0_0_alpha1_UNRELEASED.minimumCompatibilityVersion().minor);
|
||||
assertEquals(0, Version.V_6_0_0_alpha1_UNRELEASED.minimumCompatibilityVersion().revision);
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
@ -204,7 +201,7 @@ public class VersionTests extends ESTestCase {
|
||||
|
||||
public void testParseLenient() {
|
||||
// note this is just a silly sanity check, we test it in lucene
|
||||
for (Version version : VersionUtils.allVersions()) {
|
||||
for (Version version : VersionUtils.allReleasedVersions()) {
|
||||
org.apache.lucene.util.Version luceneVersion = version.luceneVersion;
|
||||
String string = luceneVersion.toString().toUpperCase(Locale.ROOT)
|
||||
.replaceFirst("^LUCENE_(\\d+)_(\\d+)$", "$1.$2");
|
||||
@ -213,20 +210,27 @@ public class VersionTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testAllVersionsMatchId() throws Exception {
|
||||
final Set<Version> releasedVersions = new HashSet<>(VersionUtils.allReleasedVersions());
|
||||
final Set<Version> unreleasedVersions = new HashSet<>(VersionUtils.allUnreleasedVersions());
|
||||
Map<String, Version> maxBranchVersions = new HashMap<>();
|
||||
for (java.lang.reflect.Field field : Version.class.getFields()) {
|
||||
if (field.getName().endsWith("_ID")) {
|
||||
if (field.getName().matches("_ID(_UNRELEASED)?")) {
|
||||
assertTrue(field.getName() + " should be static", Modifier.isStatic(field.getModifiers()));
|
||||
assertTrue(field.getName() + " should be final", Modifier.isFinal(field.getModifiers()));
|
||||
int versionId = (Integer)field.get(Version.class);
|
||||
|
||||
String constantName = field.getName().substring(0, field.getName().length() - 3);
|
||||
String constantName = field.getName().substring(0, field.getName().indexOf("_ID"));
|
||||
java.lang.reflect.Field versionConstant = Version.class.getField(constantName);
|
||||
assertTrue(constantName + " should be static", Modifier.isStatic(versionConstant.getModifiers()));
|
||||
assertTrue(constantName + " should be final", Modifier.isFinal(versionConstant.getModifiers()));
|
||||
|
||||
Version v = (Version) versionConstant.get(Version.class);
|
||||
Version v = (Version) versionConstant.get(null);
|
||||
logger.debug("Checking {}", v);
|
||||
if (field.getName().endsWith("_UNRELEASED")) {
|
||||
assertTrue(unreleasedVersions.contains(v));
|
||||
} else {
|
||||
assertTrue(releasedVersions.contains(v));
|
||||
}
|
||||
assertEquals("Version id " + field.getName() + " does not point to " + constantName, v, Version.fromId(versionId));
|
||||
assertEquals("Version " + constantName + " does not have correct id", versionId, v.id);
|
||||
if (v.major >= 2) {
|
||||
@ -259,8 +263,8 @@ public class VersionTests extends ESTestCase {
|
||||
|
||||
// this test ensures we never bump the lucene version in a bugfix release
|
||||
public void testLuceneVersionIsSameOnMinorRelease() {
|
||||
for (Version version : VersionUtils.allVersions()) {
|
||||
for (Version other : VersionUtils.allVersions()) {
|
||||
for (Version version : VersionUtils.allReleasedVersions()) {
|
||||
for (Version other : VersionUtils.allReleasedVersions()) {
|
||||
if (other.onOrAfter(version)) {
|
||||
assertTrue("lucene versions must be " + other + " >= " + version,
|
||||
other.luceneVersion.onOrAfter(version.luceneVersion));
|
||||
@ -274,31 +278,16 @@ public class VersionTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
}
|
||||
private static final Version V_20_0_0_UNRELEASED = new Version(20000099, Version.CURRENT.luceneVersion);
|
||||
|
||||
// see comment in Version.java about this test
|
||||
public void testUnknownVersions() {
|
||||
assertUnknownVersion(V_20_0_0_UNRELEASED);
|
||||
expectThrows(AssertionError.class, () -> assertUnknownVersion(Version.CURRENT));
|
||||
assertUnknownVersion(AliasFilter.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant
|
||||
assertUnknownVersion(OsStats.V_5_1_0); // once we released 5.1.0 and it's added to Version.java we need to remove this constant
|
||||
assertUnknownVersion(QueryStringQueryBuilder.V_5_1_0_UNRELEASED);
|
||||
assertUnknownVersion(SimpleQueryStringBuilder.V_5_1_0_UNRELEASED);
|
||||
assertUnknownVersion(ElasticsearchException.V_5_1_0_UNRELEASED);
|
||||
// once we released 5.0.0 and it's added to Version.java we need to remove this constant
|
||||
assertUnknownVersion(Script.V_5_1_0_UNRELEASED);
|
||||
// once we released 5.0.0 and it's added to Version.java we need to remove this constant
|
||||
}
|
||||
|
||||
public static void assertUnknownVersion(Version version) {
|
||||
assertFalse("Version " + version + " has been releaed don't use a new instance of this version",
|
||||
VersionUtils.allVersions().contains(version));
|
||||
VersionUtils.allReleasedVersions().contains(version));
|
||||
}
|
||||
|
||||
public void testIsCompatible() {
|
||||
assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()));
|
||||
assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha1));
|
||||
assertFalse(isCompatible(Version.V_2_0_0, Version.V_6_0_0_alpha1));
|
||||
assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.V_2_0_0, Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.V_2_0_0, Version.V_5_0_0));
|
||||
}
|
||||
|
||||
|
@ -38,13 +38,17 @@ import org.elasticsearch.action.fieldstats.FieldStatsAction;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchTransportService;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
import org.elasticsearch.action.support.replication.TransportReplicationActionTests;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
@ -82,14 +86,22 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.emptyCollectionOf;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
/**
|
||||
* Integration tests for task management API
|
||||
@ -329,6 +341,50 @@ public class TasksIT extends ESIntegTestCase {
|
||||
assertParentTask(findEvents(BulkAction.NAME + "[s][r]", Tuple::v1), shardTask);
|
||||
}
|
||||
|
||||
|
||||
public void testSearchTaskDescriptions() {
|
||||
registerTaskManageListeners(SearchAction.NAME); // main task
|
||||
registerTaskManageListeners(SearchAction.NAME + "[*]"); // shard task
|
||||
createIndex("test");
|
||||
ensureGreen("test"); // Make sure all shards are allocated to catch replication tasks
|
||||
client().prepareIndex("test", "doc", "test_id").setSource("{\"foo\": \"bar\"}")
|
||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get();
|
||||
|
||||
assertSearchResponse(client().prepareSearch("test").setTypes("doc").setQuery(QueryBuilders.matchAllQuery()).get());
|
||||
|
||||
// the search operation should produce one main task
|
||||
List<TaskInfo> mainTask = findEvents(SearchAction.NAME, Tuple::v1);
|
||||
assertEquals(1, mainTask.size());
|
||||
assertThat(mainTask.get(0).getDescription(), startsWith("indices[test], types[doc], search_type["));
|
||||
assertThat(mainTask.get(0).getDescription(), containsString("\"query\":{\"match_all\""));
|
||||
|
||||
// check that if we have any shard-level requests they all have non-zero length description
|
||||
List<TaskInfo> shardTasks = findEvents(SearchAction.NAME + "[*]", Tuple::v1);
|
||||
for (TaskInfo taskInfo : shardTasks) {
|
||||
assertThat(taskInfo.getParentTaskId(), notNullValue());
|
||||
assertEquals(mainTask.get(0).getTaskId(), taskInfo.getParentTaskId());
|
||||
switch (taskInfo.getAction()) {
|
||||
case SearchTransportService.QUERY_ACTION_NAME:
|
||||
case SearchTransportService.QUERY_FETCH_ACTION_NAME:
|
||||
case SearchTransportService.DFS_ACTION_NAME:
|
||||
assertTrue(taskInfo.getDescription(), Regex.simpleMatch("shardId[[test][*]]", taskInfo.getDescription()));
|
||||
break;
|
||||
case SearchTransportService.QUERY_ID_ACTION_NAME:
|
||||
assertTrue(taskInfo.getDescription(), Regex.simpleMatch("id[*], indices[test]", taskInfo.getDescription()));
|
||||
break;
|
||||
case SearchTransportService.FETCH_ID_ACTION_NAME:
|
||||
assertTrue(taskInfo.getDescription(), Regex.simpleMatch("id[*], size[1], lastEmittedDoc[null]",
|
||||
taskInfo.getDescription()));
|
||||
break;
|
||||
default:
|
||||
fail("Unexpected action [" + taskInfo.getAction() + "] with description [" + taskInfo.getDescription() + "]");
|
||||
}
|
||||
// assert that all task descriptions have non-zero length
|
||||
assertThat(taskInfo.getDescription().length(), greaterThan(0));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Very basic "is it plugged in" style test that indexes a document and makes sure that you can fetch the status of the process. The
|
||||
* goal here is to verify that the large moving parts that make fetching task status work fit together rather than to verify any
|
||||
|
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.shards;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
public class ClusterSearchShardsRequestTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest();
|
||||
if (randomBoolean()) {
|
||||
int numIndices = randomIntBetween(1, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = randomAsciiOfLengthBetween(3, 10);
|
||||
}
|
||||
request.indices(indices);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.indicesOptions(
|
||||
IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.preference(randomAsciiOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int numRoutings = randomIntBetween(1, 3);
|
||||
String[] routings = new String[numRoutings];
|
||||
for (int i = 0; i < numRoutings; i++) {
|
||||
routings[i] = randomAsciiOfLengthBetween(3, 10);
|
||||
}
|
||||
request.routing(routings);
|
||||
}
|
||||
|
||||
Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT);
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(version);
|
||||
request.writeTo(out);
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
in.setVersion(version);
|
||||
ClusterSearchShardsRequest deserialized = new ClusterSearchShardsRequest();
|
||||
deserialized.readFrom(in);
|
||||
assertArrayEquals(request.indices(), deserialized.indices());
|
||||
assertSame(request.indicesOptions(), deserialized.indicesOptions());
|
||||
assertEquals(request.routing(), deserialized.routing());
|
||||
assertEquals(request.preference(), deserialized.preference());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testIndicesMustNotBeNull() {
|
||||
ClusterSearchShardsRequest request = new ClusterSearchShardsRequest();
|
||||
assertNotNull(request.indices());
|
||||
expectThrows(NullPointerException.class, () -> request.indices((String[])null));
|
||||
expectThrows(NullPointerException.class, () -> request.indices((String)null));
|
||||
expectThrows(NullPointerException.class, () -> request.indices(new String[]{"index1", null, "index3"}));
|
||||
}
|
||||
}
|
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.shards;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.index.query.RandomQueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class ClusterSearchShardsResponseTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
Map<String, AliasFilter> indicesAndFilters = new HashMap<>();
|
||||
Set<DiscoveryNode> nodes = new HashSet<>();
|
||||
int numShards = randomIntBetween(1, 10);
|
||||
ClusterSearchShardsGroup[] clusterSearchShardsGroups = new ClusterSearchShardsGroup[numShards];
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
String index = randomAsciiOfLengthBetween(3, 10);
|
||||
ShardId shardId = new ShardId(index, randomAsciiOfLength(12), i);
|
||||
String nodeId = randomAsciiOfLength(10);
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, randomBoolean(), ShardRoutingState.STARTED);
|
||||
clusterSearchShardsGroups[i] = new ClusterSearchShardsGroup(shardId, new ShardRouting[]{shardRouting});
|
||||
DiscoveryNode node = new DiscoveryNode(shardRouting.currentNodeId(),
|
||||
new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF)), VersionUtils.randomVersion(random()));
|
||||
nodes.add(node);
|
||||
AliasFilter aliasFilter;
|
||||
if (randomBoolean()) {
|
||||
aliasFilter = new AliasFilter(RandomQueryBuilder.createQuery(random()), "alias-" + index);
|
||||
} else {
|
||||
aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY);
|
||||
}
|
||||
indicesAndFilters.put(index, aliasFilter);
|
||||
}
|
||||
ClusterSearchShardsResponse clusterSearchShardsResponse = new ClusterSearchShardsResponse(clusterSearchShardsGroups,
|
||||
nodes.toArray(new DiscoveryNode[nodes.size()]), indicesAndFilters);
|
||||
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
|
||||
Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT);
|
||||
try(BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(version);
|
||||
clusterSearchShardsResponse.writeTo(out);
|
||||
try(StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry)) {
|
||||
in.setVersion(version);
|
||||
ClusterSearchShardsResponse deserialized = new ClusterSearchShardsResponse();
|
||||
deserialized.readFrom(in);
|
||||
assertArrayEquals(clusterSearchShardsResponse.getNodes(), deserialized.getNodes());
|
||||
assertEquals(clusterSearchShardsResponse.getGroups().length, deserialized.getGroups().length);
|
||||
for (int i = 0; i < clusterSearchShardsResponse.getGroups().length; i++) {
|
||||
ClusterSearchShardsGroup clusterSearchShardsGroup = clusterSearchShardsResponse.getGroups()[i];
|
||||
ClusterSearchShardsGroup deserializedGroup = deserialized.getGroups()[i];
|
||||
assertEquals(clusterSearchShardsGroup.getShardId(), deserializedGroup.getShardId());
|
||||
assertEquals(clusterSearchShardsGroup.getIndex(), deserializedGroup.getIndex());
|
||||
assertArrayEquals(clusterSearchShardsGroup.getShards(), deserializedGroup.getShards());
|
||||
}
|
||||
if (version.onOrAfter(Version.V_5_1_0_UNRELEASED)) {
|
||||
assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters());
|
||||
} else {
|
||||
assertNull(deserialized.getIndicesAndFilters());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -185,7 +185,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
|
||||
public void testAllVersionsTested() throws Exception {
|
||||
SortedSet<String> expectedVersions = new TreeSet<>();
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
for (Version v : VersionUtils.allReleasedVersions()) {
|
||||
if (VersionUtils.isSnapshot(v)) continue; // snapshots are unreleased, so there is no backcompat yet
|
||||
if (v.isRelease() == false) continue; // no guarantees for prereleases
|
||||
if (v.onOrBefore(Version.V_2_0_0_beta1)) continue; // we can only test back one major lucene version
|
||||
|
@ -95,7 +95,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
|
||||
}
|
||||
|
||||
SortedSet<String> expectedVersions = new TreeSet<>();
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
for (Version v : VersionUtils.allReleasedVersions()) {
|
||||
if (VersionUtils.isSnapshot(v)) continue; // snapshots are unreleased, so there is no backcompat yet
|
||||
if (v.isRelease() == false) continue; // no guarantees for prereleases
|
||||
if (v.onOrBefore(Version.V_2_0_0_beta1)) continue; // we can only test back one major lucene version
|
||||
|
@ -20,8 +20,13 @@
|
||||
package org.elasticsearch.client.transport;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleListener;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -37,6 +42,7 @@ import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportServiceAdapter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
@ -63,6 +69,8 @@ abstract class FailAndRetryMockTransport<Response extends TransportResponse> imp
|
||||
this.clusterName = clusterName;
|
||||
}
|
||||
|
||||
protected abstract ClusterState getMockClusterState(DiscoveryNode node);
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options)
|
||||
@ -70,9 +78,17 @@ abstract class FailAndRetryMockTransport<Response extends TransportResponse> imp
|
||||
|
||||
//we make sure that nodes get added to the connected ones when calling addTransportAddress, by returning proper nodes info
|
||||
if (connectMode) {
|
||||
TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId);
|
||||
transportResponseHandler.handleResponse(new LivenessResponse(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY),
|
||||
node));
|
||||
if (TransportLivenessAction.NAME.equals(action)) {
|
||||
TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId);
|
||||
transportResponseHandler.handleResponse(new LivenessResponse(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY),
|
||||
node));
|
||||
} else if (ClusterStateAction.NAME.equals(action)) {
|
||||
TransportResponseHandler transportResponseHandler = transportServiceAdapter.onResponseReceived(requestId);
|
||||
ClusterState clusterState = getMockClusterState(node);
|
||||
transportResponseHandler.handleResponse(new ClusterStateResponse(clusterName, clusterState));
|
||||
} else {
|
||||
throw new UnsupportedOperationException("Mock transport does not understand action " + action);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -133,7 +149,7 @@ abstract class FailAndRetryMockTransport<Response extends TransportResponse> imp
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.transport;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.transport.MockTransportClient;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collector;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class NodeDisconnectIT extends ESIntegTestCase {
|
||||
|
||||
public void testNotifyOnDisconnect() throws IOException {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
|
||||
final Set<DiscoveryNode> disconnectedNodes = Collections.synchronizedSet(new HashSet<>());
|
||||
try (TransportClient client = new MockTransportClient(Settings.builder()
|
||||
.put("cluster.name", internalCluster().getClusterName()).build(), Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) {
|
||||
for (TransportService service : internalCluster().getInstances(TransportService.class)) {
|
||||
client.addTransportAddress(service.boundAddress().publishAddress());
|
||||
}
|
||||
internalCluster().stopRandomDataNode();
|
||||
for (int i = 0; i < 20; i++) { // fire up requests such that we hit the node and pass it to the listener
|
||||
client.admin().cluster().prepareState().get();
|
||||
}
|
||||
assertEquals(1, disconnectedNodes.size());
|
||||
}
|
||||
assertEquals(1, disconnectedNodes.size());
|
||||
}
|
||||
|
||||
public void testNotifyOnDisconnectInSniffer() throws IOException {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
|
||||
final Set<DiscoveryNode> disconnectedNodes = Collections.synchronizedSet(new HashSet<>());
|
||||
try (TransportClient client = new MockTransportClient(Settings.builder()
|
||||
.put("cluster.name", internalCluster().getClusterName()).build(), Collections.emptySet(), (n, e) -> disconnectedNodes.add(n))) {
|
||||
int numNodes = 0;
|
||||
for (TransportService service : internalCluster().getInstances(TransportService.class)) {
|
||||
numNodes++;
|
||||
client.addTransportAddress(service.boundAddress().publishAddress());
|
||||
}
|
||||
Set<TransportAddress> discoveryNodes = client.connectedNodes().stream().map(n -> n.getAddress()).collect(Collectors.toSet());
|
||||
assertEquals(numNodes, discoveryNodes.size());
|
||||
assertEquals(0, disconnectedNodes.size());
|
||||
internalCluster().stopRandomDataNode();
|
||||
client.getNodesService().doSample();
|
||||
assertEquals(1, disconnectedNodes.size());
|
||||
assertTrue(discoveryNodes.contains(disconnectedNodes.stream().findAny().get().getAddress()));
|
||||
}
|
||||
assertEquals(1, disconnectedNodes.size());
|
||||
}
|
||||
}
|
@ -19,37 +19,44 @@
|
||||
|
||||
package org.elasticsearch.client.transport;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.LivenessResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.hamcrest.CustomMatcher;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.everyItem;
|
||||
import static org.hamcrest.CoreMatchers.hasItem;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.hamcrest.CoreMatchers.nullValue;
|
||||
import static org.hamcrest.CoreMatchers.startsWith;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
@ -62,14 +69,44 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
private final FailAndRetryMockTransport<TestResponse> transport;
|
||||
private final TransportService transportService;
|
||||
private final TransportClientNodesService transportClientNodesService;
|
||||
private final int nodesCount;
|
||||
private final int listNodesCount;
|
||||
private final int sniffNodesCount;
|
||||
private TransportAddress livenessAddress = buildNewFakeTransportAddress();
|
||||
public Set<TransportAddress> nodeAddresses = new HashSet<>();
|
||||
final List<TransportAddress> listNodeAddresses;
|
||||
// map for each address of the nodes a cluster state request should respond with
|
||||
final Map<TransportAddress, DiscoveryNodes> nodeMap;
|
||||
|
||||
|
||||
TestIteration() {
|
||||
Settings settings = Settings.builder().put("cluster.name", "test").build();
|
||||
TestIteration(Object... extraSettings) {
|
||||
Settings settings = Settings.builder().put(extraSettings).put("cluster.name", "test").build();
|
||||
ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
List<TransportAddress> listNodes = new ArrayList<>();
|
||||
Map<TransportAddress, DiscoveryNodes> nodeMap = new HashMap<>();
|
||||
this.listNodesCount = randomIntBetween(1, 10);
|
||||
int sniffNodesCount = 0;
|
||||
for (int i = 0; i < listNodesCount; i++) {
|
||||
TransportAddress transportAddress = buildNewFakeTransportAddress();
|
||||
listNodes.add(transportAddress);
|
||||
DiscoveryNodes.Builder discoNodes = DiscoveryNodes.builder();
|
||||
discoNodes.add(new DiscoveryNode("#list-node#-" + transportAddress, transportAddress, Version.CURRENT));
|
||||
|
||||
if (TransportClient.CLIENT_TRANSPORT_SNIFF.get(settings)) {
|
||||
final int numSniffNodes = randomIntBetween(0, 3);
|
||||
for (int j = 0; j < numSniffNodes; ++j) {
|
||||
TransportAddress sniffAddress = buildNewFakeTransportAddress();
|
||||
DiscoveryNode sniffNode = new DiscoveryNode("#sniff-node#-" + sniffAddress, sniffAddress, Version.CURRENT);
|
||||
discoNodes.add(sniffNode);
|
||||
// also allow sniffing of the sniff node itself
|
||||
nodeMap.put(sniffAddress, DiscoveryNodes.builder().add(sniffNode).build());
|
||||
++sniffNodesCount;
|
||||
}
|
||||
}
|
||||
nodeMap.put(transportAddress, discoNodes.build());
|
||||
}
|
||||
listNodeAddresses = listNodes;
|
||||
this.nodeMap = nodeMap;
|
||||
this.sniffNodesCount = sniffNodesCount;
|
||||
|
||||
threadPool = new TestThreadPool("transport-client-nodes-service-tests");
|
||||
transport = new FailAndRetryMockTransport<TestResponse>(random(), clusterName) {
|
||||
@Override
|
||||
@ -79,7 +116,12 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
|
||||
@Override
|
||||
protected TestResponse newResponse() {
|
||||
return new TestResponse();
|
||||
return new TestResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterState getMockClusterState(DiscoveryNode node) {
|
||||
return ClusterState.builder(clusterName).nodes(TestIteration.this.nodeMap.get(node.getAddress())).build();
|
||||
}
|
||||
};
|
||||
transportService = new TransportService(settings, transport, threadPool, new TransportInterceptor() {
|
||||
@ -101,14 +143,8 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
transportClientNodesService =
|
||||
new TransportClientNodesService(settings, transportService, threadPool);
|
||||
this.nodesCount = randomIntBetween(1, 10);
|
||||
for (int i = 0; i < nodesCount; i++) {
|
||||
TransportAddress transportAddress = buildNewFakeTransportAddress();
|
||||
nodeAddresses.add(transportAddress);
|
||||
transportClientNodesService.addTransportAddresses(transportAddress);
|
||||
}
|
||||
transport.endConnectMode();
|
||||
new TransportClientNodesService(settings, transportService, threadPool, (a, b) -> {});
|
||||
transportClientNodesService.addTransportAddresses(listNodeAddresses.toArray(new TransportAddress[0]));
|
||||
}
|
||||
|
||||
private <T extends TransportResponse> TransportResponseHandler wrapLivenessResponseHandler(TransportResponseHandler<T> handler,
|
||||
@ -145,7 +181,7 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
transport.endConnectMode();
|
||||
transportService.stop();
|
||||
transportClientNodesService.close();
|
||||
try {
|
||||
@ -160,6 +196,7 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
int iters = iterations(10, 100);
|
||||
for (int i = 0; i <iters; i++) {
|
||||
try(final TestIteration iteration = new TestIteration()) {
|
||||
iteration.transport.endConnectMode(); // stop transport from responding early
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final AtomicInteger finalFailures = new AtomicInteger();
|
||||
final AtomicReference<Throwable> finalFailure = new AtomicReference<>();
|
||||
@ -230,7 +267,7 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.nodesCount));
|
||||
assertThat(iteration.transport.triedNodes().size(), lessThanOrEqualTo(iteration.listNodesCount));
|
||||
assertThat(iteration.transport.triedNodes().size(), equalTo(iteration.transport.connectTransportExceptions() +
|
||||
iteration.transport.failures() + iteration.transport.successes()));
|
||||
}
|
||||
@ -241,17 +278,42 @@ public class TransportClientNodesServiceTests extends ESTestCase {
|
||||
int iters = iterations(10, 100);
|
||||
for (int i = 0; i <iters; i++) {
|
||||
try(final TestIteration iteration = new TestIteration()) {
|
||||
assertThat(iteration.transportClientNodesService.connectedNodes().size(), lessThanOrEqualTo(iteration.nodesCount));
|
||||
assertThat(iteration.transportClientNodesService.connectedNodes().size(), lessThanOrEqualTo(iteration.listNodesCount));
|
||||
for (DiscoveryNode discoveryNode : iteration.transportClientNodesService.connectedNodes()) {
|
||||
assertThat(discoveryNode.getHostName(), startsWith("liveness-"));
|
||||
assertThat(discoveryNode.getHostAddress(), startsWith("liveness-"));
|
||||
assertNotEquals(discoveryNode.getAddress(), iteration.livenessAddress);
|
||||
assertThat(iteration.nodeAddresses, hasItem(discoveryNode.getAddress()));
|
||||
assertThat(iteration.listNodeAddresses, hasItem(discoveryNode.getAddress()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testRemoveAddressSniff() {
|
||||
checkRemoveAddress(true);
|
||||
}
|
||||
|
||||
public void testRemoveAddressSimple() {
|
||||
checkRemoveAddress(false);
|
||||
}
|
||||
|
||||
private void checkRemoveAddress(boolean sniff) {
|
||||
Object[] extraSettings = {TransportClient.CLIENT_TRANSPORT_SNIFF.getKey(), sniff};
|
||||
try(final TestIteration iteration = new TestIteration(extraSettings)) {
|
||||
final TransportClientNodesService service = iteration.transportClientNodesService;
|
||||
assertEquals(iteration.listNodesCount + iteration.sniffNodesCount, service.connectedNodes().size());
|
||||
final TransportAddress addressToRemove = randomFrom(iteration.listNodeAddresses);
|
||||
service.removeTransportAddress(addressToRemove);
|
||||
assertThat(service.connectedNodes(), everyItem(not(new CustomMatcher<DiscoveryNode>("removed address") {
|
||||
@Override
|
||||
public boolean matches(Object item) {
|
||||
return item instanceof DiscoveryNode && ((DiscoveryNode)item).getAddress().equals(addressToRemove);
|
||||
}
|
||||
})));
|
||||
assertEquals(iteration.listNodesCount + iteration.sniffNodesCount - 1, service.connectedNodes().size());
|
||||
}
|
||||
}
|
||||
|
||||
public static class TestRequest extends TransportRequest {
|
||||
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
@ -188,7 +189,7 @@ public class NodeConnectionsServiceTests extends ESTestCase {
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
|
||||
return new TransportAddress[0];
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ public class RoutingBackwardCompatibilityTests extends ESTestCase {
|
||||
|
||||
OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY,
|
||||
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
|
||||
for (Version version : VersionUtils.allVersions()) {
|
||||
for (Version version : VersionUtils.allReleasedVersions()) {
|
||||
if (version.onOrAfter(Version.V_2_0_0) == false) {
|
||||
// unsupported version, no need to test
|
||||
continue;
|
||||
|
@ -48,7 +48,7 @@ public class CodecTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testAcceptPostingsFormat() throws IOException {
|
||||
int i = 0;
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
for (Version v : VersionUtils.allReleasedVersions()) {
|
||||
if (v.onOrAfter(Version.V_2_0_0) == false) {
|
||||
// no need to test, we don't support upgrading from these versions
|
||||
continue;
|
||||
@ -82,7 +82,7 @@ public class CodecTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testAcceptDocValuesFormat() throws IOException {
|
||||
int i = 0;
|
||||
for (Version v : VersionUtils.allVersions()) {
|
||||
for (Version v : VersionUtils.allReleasedVersions()) {
|
||||
if (v.onOrAfter(Version.V_2_0_0) == false) {
|
||||
// no need to test, we don't support upgrading from these versions
|
||||
continue;
|
||||
|
@ -706,6 +706,43 @@ public class CacheTests extends ESTestCase {
|
||||
barrier.await();
|
||||
}
|
||||
|
||||
public void testExceptionThrownDuringConcurrentComputeIfAbsent() throws BrokenBarrierException, InterruptedException {
|
||||
int numberOfThreads = randomIntBetween(2, 32);
|
||||
final Cache<String, String> cache = CacheBuilder.<String, String>builder().build();
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
|
||||
|
||||
final String key = randomAsciiOfLengthBetween(2, 32);
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
Thread thread = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
for (int j = 0; j < numberOfEntries; j++) {
|
||||
try {
|
||||
String value = cache.computeIfAbsent(key, k -> {
|
||||
throw new RuntimeException("failed to load");
|
||||
});
|
||||
fail("expected exception but got: " + value);
|
||||
} catch (ExecutionException e) {
|
||||
assertNotNull(e.getCause());
|
||||
assertThat(e.getCause(), instanceOf(RuntimeException.class));
|
||||
assertEquals(e.getCause().getMessage(), "failed to load");
|
||||
}
|
||||
}
|
||||
barrier.await();
|
||||
} catch (BrokenBarrierException | InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
});
|
||||
thread.start();
|
||||
}
|
||||
|
||||
// wait for all threads to be ready
|
||||
barrier.await();
|
||||
// wait for all threads to finish
|
||||
barrier.await();
|
||||
}
|
||||
|
||||
// test that the cache is not corrupted under lots of concurrent modifications, even hitting the same key
|
||||
// here be dragons: this test did catch one subtle bug during development; do not remove lightly
|
||||
public void testTorture() throws BrokenBarrierException, InterruptedException {
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
@ -34,53 +35,121 @@ import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.MockTcpTransport;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.TransportSettings;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.mockito.Matchers;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Stack;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
|
||||
public class UnicastZenPingTests extends ESTestCase {
|
||||
|
||||
private ThreadPool threadPool;
|
||||
private ExecutorService executorService;
|
||||
// close in reverse order as opened
|
||||
private Stack<Closeable> closeables;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = new TestThreadPool(getClass().getName());
|
||||
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory("[" + getClass().getName() + "]");
|
||||
executorService =
|
||||
EsExecutors.newScaling(getClass().getName(), 0, 2, 60, TimeUnit.SECONDS, threadFactory, threadPool.getThreadContext());
|
||||
closeables = new Stack<>();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
try {
|
||||
// JDK stack is broken, it does not iterate in the expected order (http://bugs.java.com/bugdatabase/view_bug.do?bug_id=4475301)
|
||||
final List<Closeable> reverse = new ArrayList<>();
|
||||
while (!closeables.isEmpty()) {
|
||||
reverse.add(closeables.pop());
|
||||
}
|
||||
IOUtils.close(reverse);
|
||||
} finally {
|
||||
terminate(executorService);
|
||||
terminate(threadPool);
|
||||
super.tearDown();
|
||||
}
|
||||
}
|
||||
|
||||
private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList;
|
||||
|
||||
public void testSimplePings() throws IOException, InterruptedException {
|
||||
int startPort = 11000 + randomIntBetween(0, 1000);
|
||||
int endPort = startPort + 10;
|
||||
Settings settings = Settings.builder()
|
||||
.put("cluster.name", "test")
|
||||
.put(TransportSettings.PORT.getKey(), startPort + "-" + endPort).build();
|
||||
// use ephemeral ports
|
||||
final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build();
|
||||
final Settings settingsMismatch =
|
||||
Settings.builder().put(settings).put("cluster.name", "mismatch").put(TransportSettings.PORT.getKey(), 0).build();
|
||||
|
||||
Settings settingsMismatch = Settings.builder().put(settings)
|
||||
.put("cluster.name", "mismatch")
|
||||
.put(TransportSettings.PORT.getKey(), startPort + "-" + endPort).build();
|
||||
|
||||
ThreadPool threadPool = new TestThreadPool(getClass().getName());
|
||||
NetworkService networkService = new NetworkService(settings, Collections.emptyList());
|
||||
|
||||
NetworkHandle handleA = startServices(settings, threadPool, networkService, "UZP_A", Version.CURRENT);
|
||||
NetworkHandle handleB = startServices(settings, threadPool, networkService, "UZP_B", Version.CURRENT);
|
||||
NetworkHandle handleC = startServices(settingsMismatch, threadPool, networkService, "UZP_C", Version.CURRENT);
|
||||
final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(
|
||||
s,
|
||||
threadPool,
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
networkService,
|
||||
v);
|
||||
|
||||
NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier);
|
||||
closeables.push(handleA.transportService);
|
||||
NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier);
|
||||
closeables.push(handleB.transportService);
|
||||
NetworkHandle handleC = startServices(settingsMismatch, threadPool, "UZP_C", Version.CURRENT, supplier);
|
||||
closeables.push(handleC.transportService);
|
||||
// just fake that no versions are compatible with this node
|
||||
Version previousVersion = VersionUtils.getPreviousVersion(Version.CURRENT.minimumCompatibilityVersion());
|
||||
Version versionD = VersionUtils.randomVersionBetween(random(), previousVersion.minimumCompatibilityVersion(), previousVersion);
|
||||
NetworkHandle handleD = startServices(settingsMismatch, threadPool, networkService, "UZP_D", versionD);
|
||||
NetworkHandle handleD = startServices(settingsMismatch, threadPool, "UZP_D", versionD, supplier);
|
||||
closeables.push(handleD.transportService);
|
||||
|
||||
final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomPositiveLong()).build();
|
||||
|
||||
@ -106,6 +175,7 @@ public class UnicastZenPingTests extends ESTestCase {
|
||||
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
|
||||
}
|
||||
});
|
||||
closeables.push(zenPingA);
|
||||
|
||||
UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingB.start(new PingContextProvider() {
|
||||
@ -119,6 +189,7 @@ public class UnicastZenPingTests extends ESTestCase {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
closeables.push(zenPingB);
|
||||
|
||||
UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER) {
|
||||
@Override
|
||||
@ -137,6 +208,7 @@ public class UnicastZenPingTests extends ESTestCase {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
closeables.push(zenPingC);
|
||||
|
||||
UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingD.start(new PingContextProvider() {
|
||||
@ -150,42 +222,319 @@ public class UnicastZenPingTests extends ESTestCase {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
closeables.push(zenPingD);
|
||||
|
||||
try {
|
||||
logger.info("ping from UZP_A");
|
||||
Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
logger.info("ping from UZP_A");
|
||||
Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(1));
|
||||
ZenPing.PingResponse ping = pingResponses.iterator().next();
|
||||
assertThat(ping.node().getId(), equalTo("UZP_B"));
|
||||
assertThat(ping.getClusterStateVersion(), equalTo(state.version()));
|
||||
assertCounters(handleA, handleA, handleB, handleC, handleD);
|
||||
|
||||
// ping again, this time from B,
|
||||
logger.info("ping from UZP_B");
|
||||
pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(1));
|
||||
ping = pingResponses.iterator().next();
|
||||
assertThat(ping.node().getId(), equalTo("UZP_A"));
|
||||
assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION));
|
||||
assertCounters(handleB, handleA, handleB, handleC, handleD);
|
||||
|
||||
logger.info("ping from UZP_C");
|
||||
pingResponses = zenPingC.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(0));
|
||||
assertCounters(handleC, handleA, handleB, handleC, handleD);
|
||||
|
||||
logger.info("ping from UZP_D");
|
||||
pingResponses = zenPingD.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(0));
|
||||
assertCounters(handleD, handleA, handleB, handleC, handleD);
|
||||
}
|
||||
|
||||
public void testUnknownHostNotCached() {
|
||||
// use ephemeral ports
|
||||
final Settings settings = Settings.builder().put("cluster.name", "test").put(TransportSettings.PORT.getKey(), 0).build();
|
||||
|
||||
final NetworkService networkService = new NetworkService(settings, Collections.emptyList());
|
||||
|
||||
final Map<String, TransportAddress[]> addresses = new HashMap<>();
|
||||
final BiFunction<Settings, Version, Transport> supplier = (s, v) -> new MockTcpTransport(
|
||||
s,
|
||||
threadPool,
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
networkService,
|
||||
v) {
|
||||
@Override
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
|
||||
final TransportAddress[] transportAddresses = addresses.get(address);
|
||||
if (transportAddresses == null) {
|
||||
throw new UnknownHostException(address);
|
||||
} else {
|
||||
return transportAddresses;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
final NetworkHandle handleA = startServices(settings, threadPool, "UZP_A", Version.CURRENT, supplier);
|
||||
closeables.push(handleA.transportService);
|
||||
final NetworkHandle handleB = startServices(settings, threadPool, "UZP_B", Version.CURRENT, supplier);
|
||||
closeables.push(handleB.transportService);
|
||||
final NetworkHandle handleC = startServices(settings, threadPool, "UZP_C", Version.CURRENT, supplier);
|
||||
closeables.push(handleC.transportService);
|
||||
|
||||
addresses.put(
|
||||
"UZP_A",
|
||||
new TransportAddress[]{
|
||||
new TransportAddress(
|
||||
new InetSocketAddress(handleA.address.address().getAddress(), handleA.address.address().getPort()))});
|
||||
addresses.put(
|
||||
"UZP_C",
|
||||
new TransportAddress[]{
|
||||
new TransportAddress(
|
||||
new InetSocketAddress(handleC.address.address().getAddress(), handleC.address.address().getPort()))});
|
||||
|
||||
final Settings hostsSettings = Settings.builder()
|
||||
.putArray("discovery.zen.ping.unicast.hosts", "UZP_A", "UZP_B", "UZP_C")
|
||||
.put("cluster.name", "test")
|
||||
.build();
|
||||
|
||||
final ClusterState state = ClusterState.builder(new ClusterName("test")).version(randomPositiveLong()).build();
|
||||
|
||||
final UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingA.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)).build();
|
||||
}
|
||||
});
|
||||
closeables.push(zenPingA);
|
||||
|
||||
UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingB.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
closeables.push(zenPingB);
|
||||
|
||||
UnicastZenPing zenPingC = new UnicastZenPing(hostsSettings, threadPool, handleC.transportService, EMPTY_HOSTS_PROVIDER);
|
||||
zenPingC.start(new PingContextProvider() {
|
||||
@Override
|
||||
public DiscoveryNodes nodes() {
|
||||
return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState clusterState() {
|
||||
return state;
|
||||
}
|
||||
});
|
||||
closeables.push(zenPingC);
|
||||
|
||||
// the presence of an unresolvable host should not prevent resolvable hosts from being pinged
|
||||
{
|
||||
final Collection<ZenPing.PingResponse> pingResponses = zenPingA.pingAndWait(TimeValue.timeValueSeconds(3));
|
||||
assertThat(pingResponses.size(), equalTo(1));
|
||||
ZenPing.PingResponse ping = pingResponses.iterator().next();
|
||||
assertThat(ping.node().getId(), equalTo("UZP_B"));
|
||||
assertThat(ping.node().getId(), equalTo("UZP_C"));
|
||||
assertThat(ping.getClusterStateVersion(), equalTo(state.version()));
|
||||
assertCounters(handleA, handleA, handleB, handleC, handleD);
|
||||
|
||||
// ping again, this time from B,
|
||||
logger.info("ping from UZP_B");
|
||||
pingResponses = zenPingB.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(1));
|
||||
ping = pingResponses.iterator().next();
|
||||
assertThat(ping.node().getId(), equalTo("UZP_A"));
|
||||
assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION));
|
||||
assertCounters(handleB, handleA, handleB, handleC, handleD);
|
||||
|
||||
logger.info("ping from UZP_C");
|
||||
pingResponses = zenPingC.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(0));
|
||||
assertCounters(handleC, handleA, handleB, handleC, handleD);
|
||||
|
||||
logger.info("ping from UZP_D");
|
||||
pingResponses = zenPingD.pingAndWait(TimeValue.timeValueSeconds(1));
|
||||
assertThat(pingResponses.size(), equalTo(0));
|
||||
assertCounters(handleD, handleA, handleB, handleC, handleD);
|
||||
} finally {
|
||||
try {
|
||||
IOUtils.close(zenPingA, zenPingB, zenPingC, zenPingD,
|
||||
handleA.transportService, handleB.transportService, handleC.transportService, handleD.transportService);
|
||||
} finally {
|
||||
terminate(threadPool);
|
||||
}
|
||||
assertCounters(handleA, handleA, handleC);
|
||||
assertNull(handleA.counters.get(handleB.address));
|
||||
}
|
||||
|
||||
// now allow UZP_B to be resolvable
|
||||
addresses.put(
|
||||
"UZP_B",
|
||||
new TransportAddress[]{
|
||||
new TransportAddress(
|
||||
new InetSocketAddress(handleB.address.address().getAddress(), handleB.address.address().getPort()))});
|
||||
|
||||
// now we should see pings to UZP_B; this establishes that host resolutions are not cached
|
||||
{
|
||||
// ping from C so that we can assert on the counters from a fresh source (as opposed to resetting them)
|
||||
final Collection<ZenPing.PingResponse> secondPingResponses = zenPingC.pingAndWait(TimeValue.timeValueSeconds(3));
|
||||
assertThat(secondPingResponses.size(), equalTo(2));
|
||||
final Set<String> ids = new HashSet<>(secondPingResponses.stream().map(p -> p.node().getId()).collect(Collectors.toList()));
|
||||
assertThat(ids, equalTo(new HashSet<>(Arrays.asList("UZP_A", "UZP_B"))));
|
||||
assertCounters(handleC, handleA, handleB, handleC);
|
||||
}
|
||||
}
|
||||
|
||||
public void testPortLimit() throws InterruptedException {
|
||||
final NetworkService networkService = new NetworkService(Settings.EMPTY, Collections.emptyList());
|
||||
final Transport transport = new MockTcpTransport(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
networkService,
|
||||
Version.CURRENT);
|
||||
closeables.push(transport);
|
||||
final TransportService transportService =
|
||||
new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
closeables.push(transportService);
|
||||
final AtomicInteger idGenerator = new AtomicInteger();
|
||||
final int limitPortCounts = randomIntBetween(1, 10);
|
||||
final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(
|
||||
executorService,
|
||||
logger,
|
||||
Collections.singletonList("127.0.0.1"),
|
||||
limitPortCounts,
|
||||
transportService,
|
||||
() -> Integer.toString(idGenerator.incrementAndGet()),
|
||||
TimeValue.timeValueMillis(100));
|
||||
assertThat(discoveryNodes, hasSize(limitPortCounts));
|
||||
final Set<Integer> ports = new HashSet<>();
|
||||
for (final DiscoveryNode discoveryNode : discoveryNodes) {
|
||||
assertTrue(discoveryNode.getAddress().address().getAddress().isLoopbackAddress());
|
||||
ports.add(discoveryNode.getAddress().getPort());
|
||||
}
|
||||
assertThat(ports, equalTo(IntStream.range(9300, 9300 + limitPortCounts).mapToObj(m -> m).collect(Collectors.toSet())));
|
||||
}
|
||||
|
||||
public void testUnknownHost() throws InterruptedException {
|
||||
final Logger logger = mock(Logger.class);
|
||||
final NetworkService networkService = new NetworkService(Settings.EMPTY, Collections.emptyList());
|
||||
final String hostname = randomAsciiOfLength(8);
|
||||
final UnknownHostException unknownHostException = new UnknownHostException(hostname);
|
||||
final Transport transport = new MockTcpTransport(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
networkService,
|
||||
Version.CURRENT) {
|
||||
|
||||
@Override
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
|
||||
throw unknownHostException;
|
||||
}
|
||||
|
||||
};
|
||||
closeables.push(transport);
|
||||
|
||||
final TransportService transportService =
|
||||
new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
closeables.push(transportService);
|
||||
final AtomicInteger idGenerator = new AtomicInteger();
|
||||
|
||||
final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(
|
||||
executorService,
|
||||
logger,
|
||||
Arrays.asList(hostname),
|
||||
1,
|
||||
transportService,
|
||||
() -> Integer.toString(idGenerator.incrementAndGet()),
|
||||
TimeValue.timeValueMillis(100)
|
||||
);
|
||||
|
||||
assertThat(discoveryNodes, empty());
|
||||
verify(logger).warn("failed to resolve host [" + hostname + "]", unknownHostException);
|
||||
}
|
||||
|
||||
public void testResolveTimeout() throws InterruptedException {
|
||||
final Logger logger = mock(Logger.class);
|
||||
final NetworkService networkService = new NetworkService(Settings.EMPTY, Collections.emptyList());
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final Transport transport = new MockTcpTransport(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
networkService,
|
||||
Version.CURRENT) {
|
||||
|
||||
@Override
|
||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException {
|
||||
if ("hostname1".equals(address)) {
|
||||
return new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)};
|
||||
} else if ("hostname2".equals(address)) {
|
||||
try {
|
||||
latch.await();
|
||||
return new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)};
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
} else {
|
||||
throw new UnknownHostException(address);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
closeables.push(transport);
|
||||
|
||||
final TransportService transportService =
|
||||
new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
closeables.push(transportService);
|
||||
final AtomicInteger idGenerator = new AtomicInteger();
|
||||
final TimeValue resolveTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 200));
|
||||
try {
|
||||
final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(
|
||||
executorService,
|
||||
logger,
|
||||
Arrays.asList("hostname1", "hostname2"),
|
||||
1,
|
||||
transportService,
|
||||
() -> Integer.toString(idGenerator.incrementAndGet()),
|
||||
resolveTimeout);
|
||||
|
||||
assertThat(discoveryNodes, hasSize(1));
|
||||
verify(logger).trace(
|
||||
"resolved host [{}] to {}", "hostname1",
|
||||
new TransportAddress[]{new TransportAddress(TransportAddress.META_ADDRESS, 9300)});
|
||||
verify(logger).warn("timed out after [{}] resolving host [{}]", resolveTimeout, "hostname2");
|
||||
verifyNoMoreInteractions(logger);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidHosts() throws InterruptedException {
|
||||
final Logger logger = mock(Logger.class);
|
||||
final NetworkService networkService = new NetworkService(Settings.EMPTY, Collections.emptyList());
|
||||
final Transport transport = new MockTcpTransport(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(),
|
||||
new NamedWriteableRegistry(Collections.emptyList()),
|
||||
networkService,
|
||||
Version.CURRENT);
|
||||
closeables.push(transport);
|
||||
|
||||
final TransportService transportService =
|
||||
new TransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
closeables.push(transportService);
|
||||
final AtomicInteger idGenerator = new AtomicInteger();
|
||||
final List<DiscoveryNode> discoveryNodes = UnicastZenPing.resolveDiscoveryNodes(
|
||||
executorService,
|
||||
logger,
|
||||
Arrays.asList("127.0.0.1:9300:9300", "127.0.0.1:9301"),
|
||||
1,
|
||||
transportService,
|
||||
() -> Integer.toString(idGenerator.incrementAndGet()),
|
||||
TimeValue.timeValueMillis(100));
|
||||
assertThat(discoveryNodes, hasSize(1)); // only one of the two is valid and will be used
|
||||
assertThat(discoveryNodes.get(0).getAddress().getAddress(), equalTo("127.0.0.1"));
|
||||
assertThat(discoveryNodes.get(0).getAddress().getPort(), equalTo(9301));
|
||||
verify(logger).warn(eq("failed to resolve host [127.0.0.1:9300:9300]"), Matchers.any(ExecutionException.class));
|
||||
}
|
||||
|
||||
// assert that we tried to ping each of the configured nodes at least once
|
||||
@ -197,16 +546,20 @@ public class UnicastZenPingTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
private NetworkHandle startServices(Settings settings, ThreadPool threadPool, NetworkService networkService, String nodeId,
|
||||
Version version) {
|
||||
MockTcpTransport transport = new MockTcpTransport(settings, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
|
||||
new NoneCircuitBreakerService(), new NamedWriteableRegistry(Collections.emptyList()), networkService, version);
|
||||
final TransportService transportService = new TransportService(settings, transport, threadPool,
|
||||
TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
private NetworkHandle startServices(
|
||||
final Settings settings,
|
||||
final ThreadPool threadPool,
|
||||
final String nodeId,
|
||||
final Version version,
|
||||
final BiFunction<Settings, Version, Transport> supplier) {
|
||||
final Transport transport = supplier.apply(settings, version);
|
||||
final TransportService transportService =
|
||||
new TransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
ConcurrentMap<TransportAddress, AtomicInteger> counters = ConcurrentCollections.newConcurrentMap();
|
||||
final ConcurrentMap<TransportAddress, AtomicInteger> counters = ConcurrentCollections.newConcurrentMap();
|
||||
transportService.addConnectionListener(new TransportConnectionListener() {
|
||||
|
||||
@Override
|
||||
public void onNodeConnected(DiscoveryNode node) {
|
||||
counters.computeIfAbsent(node.getAddress(), k -> new AtomicInteger());
|
||||
@ -216,25 +569,32 @@ public class UnicastZenPingTests extends ESTestCase {
|
||||
@Override
|
||||
public void onNodeDisconnected(DiscoveryNode node) {
|
||||
}
|
||||
|
||||
});
|
||||
final DiscoveryNode node = new DiscoveryNode(nodeId, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(),
|
||||
version);
|
||||
final DiscoveryNode node =
|
||||
new DiscoveryNode(nodeId, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(), version);
|
||||
transportService.setLocalNode(node);
|
||||
return new NetworkHandle((TransportAddress)transport.boundAddress().publishAddress(), transportService, node, counters);
|
||||
return new NetworkHandle(transport.boundAddress().publishAddress(), transportService, node, counters);
|
||||
}
|
||||
|
||||
private static class NetworkHandle {
|
||||
|
||||
public final TransportAddress address;
|
||||
public final TransportService transportService;
|
||||
public final DiscoveryNode node;
|
||||
public final ConcurrentMap<TransportAddress, AtomicInteger> counters;
|
||||
|
||||
public NetworkHandle(TransportAddress address, TransportService transportService, DiscoveryNode discoveryNode,
|
||||
ConcurrentMap<TransportAddress, AtomicInteger> counters) {
|
||||
public NetworkHandle(
|
||||
final TransportAddress address,
|
||||
final TransportService transportService,
|
||||
final DiscoveryNode discoveryNode,
|
||||
final ConcurrentMap<TransportAddress, AtomicInteger> counters) {
|
||||
this.address = address;
|
||||
this.transportService = transportService;
|
||||
this.node = discoveryNode;
|
||||
this.counters = counters;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.env.ShardLockObtainFailedException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
@ -174,6 +175,69 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when the node returns a ShardLockObtainFailedException, it will be considered as a valid shard copy
|
||||
*/
|
||||
public void testShardLockObtainFailedException() {
|
||||
final RoutingAllocation allocation;
|
||||
boolean useAllocationIds = randomBoolean();
|
||||
if (useAllocationIds) {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,
|
||||
randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean(),
|
||||
new ShardLockObtainFailedException(shardId, "test"));
|
||||
} else {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_1);
|
||||
testAllocator.addData(node1, 3, null, randomBoolean(), new ShardLockObtainFailedException(shardId, "test"));
|
||||
}
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.getId()));
|
||||
if (useAllocationIds) {
|
||||
// check that allocation id is reused
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("allocId1"));
|
||||
}
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when one node returns a ShardLockObtainFailedException and another properly loads the store, it will
|
||||
* select the second node as target
|
||||
*/
|
||||
public void testShardLockObtainFailedExceptionPreferOtherValidCopies() {
|
||||
final RoutingAllocation allocation;
|
||||
boolean useAllocationIds = randomBoolean();
|
||||
String allocId1 = randomAsciiOfLength(10);
|
||||
String allocId2 = randomAsciiOfLength(10);
|
||||
if (useAllocationIds) {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,
|
||||
randomFrom(Version.V_2_0_0, Version.CURRENT), allocId1, allocId2);
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, allocId1, randomBoolean(),
|
||||
new ShardLockObtainFailedException(shardId, "test"));
|
||||
testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, allocId2, randomBoolean(), null);
|
||||
} else {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, Version.V_2_1_1);
|
||||
testAllocator.addData(node1, 3, null, randomBoolean(), new ShardLockObtainFailedException(shardId, "test"));
|
||||
if (randomBoolean()) {
|
||||
testAllocator.addData(node2, randomIntBetween(2, 4), null, randomBoolean(), null);
|
||||
} else {
|
||||
testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, "some alloc id", randomBoolean(), null);
|
||||
}
|
||||
}
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.getId()));
|
||||
if (useAllocationIds) {
|
||||
// check that allocation id is reused
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo(allocId2));
|
||||
}
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when there is a node to allocate the shard to, it will be allocated to it.
|
||||
*/
|
||||
|
@ -26,6 +26,8 @@ import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.core.LogEvent;
|
||||
import org.apache.logging.log4j.core.appender.AbstractAppender;
|
||||
import org.apache.logging.log4j.core.filter.RegexFilter;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.document.Field;
|
||||
@ -125,6 +127,7 @@ import org.hamcrest.MatcherAssert;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOError;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.charset.Charset;
|
||||
@ -229,8 +232,12 @@ public class InternalEngineTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode) {
|
||||
return copy(config, openMode, config.getAnalyzer());
|
||||
}
|
||||
|
||||
public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, Analyzer analyzer) {
|
||||
return new EngineConfig(openMode, config.getShardId(), config.getThreadPool(), config.getIndexSettings(), config.getWarmer(),
|
||||
config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), config.getAnalyzer(), config.getSimilarity(),
|
||||
config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), analyzer, config.getSimilarity(),
|
||||
new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(),
|
||||
config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(),
|
||||
config.getMaxUnsafeAutoIdTimestamp());
|
||||
@ -2849,4 +2856,38 @@ public class InternalEngineTests extends ESTestCase {
|
||||
assertTrue(internalEngine.failedEngine.get() instanceof MockDirectoryWrapper.FakeIOException);
|
||||
}
|
||||
}
|
||||
|
||||
public void testTragicEventErrorBubblesUp() throws IOException {
|
||||
engine.close();
|
||||
final AtomicBoolean failWithFatalError = new AtomicBoolean(true);
|
||||
final VirtualMachineError error = randomFrom(
|
||||
new InternalError(),
|
||||
new OutOfMemoryError(),
|
||||
new StackOverflowError(),
|
||||
new UnknownError());
|
||||
engine = new InternalEngine(copy(engine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName) {
|
||||
return new TokenStreamComponents(new Tokenizer() {
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
if (failWithFatalError.get()) {
|
||||
throw error;
|
||||
} else {
|
||||
throw new AssertionError("should not get to this point");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}));
|
||||
final Document document = testDocument();
|
||||
document.add(new TextField("value", "test", Field.Store.YES));
|
||||
final ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null);
|
||||
final Engine.Index first = new Engine.Index(newUid("1"), doc);
|
||||
expectThrows(error.getClass(), () -> engine.index(first));
|
||||
failWithFatalError.set(false);
|
||||
expectThrows(error.getClass(), () -> engine.index(first));
|
||||
assertNull(engine.failedEngine.get());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -27,9 +27,6 @@ import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.MapperTestUtils;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
@ -92,7 +89,7 @@ public class MultiFieldCopyToMapperTests extends ESTestCase {
|
||||
private static Tuple<List<Version>, List<Version>> versionsWithAndWithoutExpectedExceptions() {
|
||||
List<Version> versionsWithException = new ArrayList<>();
|
||||
List<Version> versionsWithoutException = new ArrayList<>();
|
||||
for (Version version : VersionUtils.allVersions()) {
|
||||
for (Version version : VersionUtils.allReleasedVersions()) {
|
||||
if (version.after(Version.V_2_1_0) ||
|
||||
(version.after(Version.V_2_0_1) && version.before(Version.V_2_1_0))) {
|
||||
versionsWithException.add(version);
|
||||
|
@ -297,7 +297,7 @@ public class QueryDSLDocumentationTests extends ESTestCase {
|
||||
parameters.put("param1", 5);
|
||||
scriptQuery(
|
||||
new Script(
|
||||
ScriptType.FILE, "groovy", "mygroovyscript",
|
||||
ScriptType.FILE, "coollang", "myscript",
|
||||
parameters)
|
||||
);
|
||||
|
||||
|
@ -1,64 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.script.ScriptSettings;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
||||
public class QueryRewriteContextTests extends ESTestCase {
|
||||
|
||||
public void testNewParseContextWithLegacyScriptLanguage() throws Exception {
|
||||
String defaultLegacyScriptLanguage = randomAsciiOfLength(4);
|
||||
IndexMetaData.Builder indexMetadata = new IndexMetaData.Builder("index");
|
||||
indexMetadata.settings(Settings.builder().put("index.version.created", Version.CURRENT)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
);
|
||||
final long nowInMills = randomPositiveLong();
|
||||
IndicesQueriesRegistry indicesQueriesRegistry = new SearchModule(Settings.EMPTY, false, emptyList()).getQueryParserRegistry();
|
||||
IndexSettings indexSettings = new IndexSettings(indexMetadata.build(),
|
||||
Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, defaultLegacyScriptLanguage).build());
|
||||
QueryRewriteContext queryRewriteContext =
|
||||
new QueryRewriteContext(indexSettings, null, null, indicesQueriesRegistry, null, null, null, () -> nowInMills);
|
||||
|
||||
// verify that the default script language in the query parse context is equal to defaultLegacyScriptLanguage variable:
|
||||
QueryParseContext queryParseContext =
|
||||
queryRewriteContext.newParseContextWithLegacyScriptLanguage(XContentHelper.createParser(new BytesArray("{}")));
|
||||
assertEquals(defaultLegacyScriptLanguage, queryParseContext.getDefaultScriptLanguage());
|
||||
|
||||
// verify that the script query's script language is equal to defaultLegacyScriptLanguage variable:
|
||||
XContentParser parser = XContentHelper.createParser(new BytesArray("{\"script\" : {\"script\": \"return true\"}}"));
|
||||
queryParseContext = queryRewriteContext.newParseContextWithLegacyScriptLanguage(parser);
|
||||
ScriptQueryBuilder queryBuilder = (ScriptQueryBuilder) queryParseContext.parseInnerQueryBuilder().get();
|
||||
assertEquals(defaultLegacyScriptLanguage, queryBuilder.script().getLang());
|
||||
}
|
||||
|
||||
}
|
@ -45,21 +45,8 @@ public class PluginsServiceTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public static class FailOnModule extends Plugin {
|
||||
public void onModule(BrokenModule brokenModule) {
|
||||
throw new IllegalStateException("boom");
|
||||
}
|
||||
}
|
||||
|
||||
public static class FilterablePlugin extends Plugin implements ScriptPlugin {}
|
||||
|
||||
public static class BrokenModule extends AbstractModule {
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
}
|
||||
}
|
||||
|
||||
static PluginsService newPluginsService(Settings settings, Class<? extends Plugin>... classpathPlugins) {
|
||||
return new PluginsService(settings, null, new Environment(settings).pluginsFile(), Arrays.asList(classpathPlugins));
|
||||
}
|
||||
@ -91,19 +78,6 @@ public class PluginsServiceTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testOnModuleExceptionsArePropagated() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
|
||||
PluginsService service = newPluginsService(settings, FailOnModule.class);
|
||||
try {
|
||||
service.processModule(new BrokenModule());
|
||||
fail("boom");
|
||||
} catch (ElasticsearchException ex) {
|
||||
assertEquals("failed to invoke onModule", ex.getMessage());
|
||||
assertEquals("boom", ex.getCause().getCause().getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testExistingPluginMissingDescriptor() throws Exception {
|
||||
Path pluginsDir = createTempDir();
|
||||
Files.createDirectory(pluginsDir.resolve("plugin-missing-descriptor"));
|
||||
|
@ -221,7 +221,7 @@ public class ScriptServiceTests extends ESTestCase {
|
||||
builder.put("script.file", "true");
|
||||
}
|
||||
buildScriptService(builder.build());
|
||||
createFileScripts("groovy", "mustache", "dtest");
|
||||
createFileScripts("mustache", "dtest");
|
||||
|
||||
for (ScriptContext scriptContext : scriptContexts) {
|
||||
// only file scripts are accepted by default
|
||||
@ -292,7 +292,7 @@ public class ScriptServiceTests extends ESTestCase {
|
||||
}
|
||||
|
||||
buildScriptService(builder.build());
|
||||
createFileScripts("groovy", "expression", "mustache", "dtest");
|
||||
createFileScripts("expression", "mustache", "dtest");
|
||||
|
||||
for (ScriptType scriptType : ScriptType.values()) {
|
||||
//make sure file scripts have a different name than inline ones.
|
||||
|
@ -34,39 +34,6 @@ import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class ScriptSettingsTests extends ESTestCase {
|
||||
|
||||
public void testDefaultLegacyLanguageIsPainless() {
|
||||
ScriptEngineRegistry scriptEngineRegistry =
|
||||
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
|
||||
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
|
||||
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
assertThat(scriptSettings.getDefaultLegacyScriptLanguageSetting().get(Settings.EMPTY),
|
||||
equalTo(ScriptSettings.LEGACY_DEFAULT_LANG));
|
||||
}
|
||||
|
||||
public void testCustomLegacyDefaultLanguage() {
|
||||
ScriptEngineRegistry scriptEngineRegistry =
|
||||
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
|
||||
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
|
||||
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
String defaultLanguage = CustomScriptEngineService.NAME;
|
||||
Settings settings = Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, defaultLanguage).build();
|
||||
assertThat(scriptSettings.getDefaultLegacyScriptLanguageSetting().get(settings), equalTo(defaultLanguage));
|
||||
}
|
||||
|
||||
public void testInvalidLegacyDefaultLanguage() {
|
||||
ScriptEngineRegistry scriptEngineRegistry =
|
||||
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
|
||||
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
|
||||
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
Settings settings = Settings.builder().put(ScriptSettings.LEGACY_SCRIPT_SETTING, "C++").build();
|
||||
try {
|
||||
scriptSettings.getDefaultLegacyScriptLanguageSetting().get(settings);
|
||||
fail("should have seen unregistered default language");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("unregistered default language [C++]"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSettingsAreProperlyPropogated() {
|
||||
ScriptEngineRegistry scriptEngineRegistry =
|
||||
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngineService()));
|
||||
|
@ -20,14 +20,9 @@
|
||||
package org.elasticsearch.search.internal;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ShardValidateQueryRequestTests;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
@ -93,8 +88,6 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
|
||||
private ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException {
|
||||
SearchRequest searchRequest = createSearchRequest();
|
||||
ShardId shardId = new ShardId(randomAsciiOfLengthBetween(2, 10), randomAsciiOfLengthBetween(2, 10), randomInt());
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, null, null, randomBoolean(), ShardRoutingState.UNASSIGNED,
|
||||
new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "reason"));
|
||||
final AliasFilter filteringAliases;
|
||||
if (randomBoolean()) {
|
||||
String[] strings = generateRandomStringArray(10, 10, false, false);
|
||||
@ -102,7 +95,7 @@ public class ShardSearchTransportRequestTests extends AbstractSearchTestCase {
|
||||
} else {
|
||||
filteringAliases = new AliasFilter(null, Strings.EMPTY_ARRAY);
|
||||
}
|
||||
return new ShardSearchTransportRequest(searchRequest, shardRouting,
|
||||
return new ShardSearchTransportRequest(searchRequest, shardId,
|
||||
randomIntBetween(1, 100), filteringAliases, Math.abs(randomLong()));
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,8 @@ package org.elasticsearch.search.scriptfilter;
|
||||
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
@ -30,6 +32,8 @@ import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
@ -37,11 +41,13 @@ import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.scriptQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope= ESIntegTestCase.Scope.SUITE)
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE)
|
||||
public class ScriptQuerySearchIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
@ -74,6 +80,16 @@ public class ScriptQuerySearchIT extends ESIntegTestCase {
|
||||
return num1.getValue() > param1;
|
||||
});
|
||||
|
||||
scripts.put("doc['binaryData'].get(0).length", vars -> {
|
||||
Map<?, ?> doc = (Map) vars.get("doc");
|
||||
return ((ScriptDocValues.BytesRefs) doc.get("binaryData")).get(0).length;
|
||||
});
|
||||
|
||||
scripts.put("doc['binaryData'].get(0).length > 15", vars -> {
|
||||
Map<?, ?> doc = (Map) vars.get("doc");
|
||||
return ((ScriptDocValues.BytesRefs) doc.get("binaryData")).get(0).length > 15;
|
||||
});
|
||||
|
||||
return scripts;
|
||||
}
|
||||
}
|
||||
@ -87,6 +103,57 @@ public class ScriptQuerySearchIT extends ESIntegTestCase {
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testCustomScriptBinaryField() throws Exception {
|
||||
final byte[] randomBytesDoc1 = getRandomBytes(15);
|
||||
final byte[] randomBytesDoc2 = getRandomBytes(16);
|
||||
|
||||
assertAcked(
|
||||
client().admin().indices().prepareCreate("my-index")
|
||||
.addMapping("my-type", createMappingSource("binary"))
|
||||
.setSettings(indexSettings())
|
||||
);
|
||||
client().prepareIndex("my-index", "my-type", "1")
|
||||
.setSource(jsonBuilder().startObject().field("binaryData",
|
||||
Base64.getEncoder().encodeToString(randomBytesDoc1)).endObject())
|
||||
.get();
|
||||
flush();
|
||||
client().prepareIndex("my-index", "my-type", "2")
|
||||
.setSource(jsonBuilder().startObject().field("binaryData",
|
||||
Base64.getEncoder().encodeToString(randomBytesDoc2)).endObject())
|
||||
.get();
|
||||
flush();
|
||||
refresh();
|
||||
|
||||
SearchResponse response = client().prepareSearch()
|
||||
.setQuery(scriptQuery(
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length > 15", emptyMap())))
|
||||
.addScriptField("sbinaryData",
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['binaryData'].get(0).length", emptyMap()))
|
||||
.get();
|
||||
|
||||
assertThat(response.getHits().totalHits(), equalTo(1L));
|
||||
assertThat(response.getHits().getAt(0).id(), equalTo("2"));
|
||||
assertThat(response.getHits().getAt(0).fields().get("sbinaryData").values().get(0), equalTo(16));
|
||||
|
||||
}
|
||||
|
||||
private byte[] getRandomBytes(int len) {
|
||||
final byte[] randomBytes = new byte[len];
|
||||
random().nextBytes(randomBytes);
|
||||
return randomBytes;
|
||||
}
|
||||
|
||||
private XContentBuilder createMappingSource(String fieldType) throws IOException {
|
||||
return XContentFactory.jsonBuilder().startObject().startObject("my-type")
|
||||
.startObject("properties")
|
||||
.startObject("binaryData")
|
||||
.field("type", fieldType)
|
||||
.field("doc_values", "true")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().endObject();
|
||||
}
|
||||
|
||||
public void testCustomScriptBoost() throws Exception {
|
||||
createIndex("test");
|
||||
client().prepareIndex("test", "type1", "1")
|
||||
@ -105,10 +172,10 @@ public class ScriptQuerySearchIT extends ESIntegTestCase {
|
||||
logger.info("running doc['num1'].value > 1");
|
||||
SearchResponse response = client().prepareSearch()
|
||||
.setQuery(scriptQuery(
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())))
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > 1", Collections.emptyMap())))
|
||||
.addSort("num1", SortOrder.ASC)
|
||||
.addScriptField("sNum1",
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()))
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()))
|
||||
.get();
|
||||
|
||||
assertThat(response.getHits().totalHits(), equalTo(2L));
|
||||
@ -126,7 +193,7 @@ public class ScriptQuerySearchIT extends ESIntegTestCase {
|
||||
.setQuery(scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)))
|
||||
.addSort("num1", SortOrder.ASC)
|
||||
.addScriptField("sNum1",
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()))
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()))
|
||||
.get();
|
||||
|
||||
assertThat(response.getHits().totalHits(), equalTo(1L));
|
||||
@ -141,7 +208,7 @@ public class ScriptQuerySearchIT extends ESIntegTestCase {
|
||||
.setQuery(scriptQuery(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value > param1", params)))
|
||||
.addSort("num1", SortOrder.ASC)
|
||||
.addScriptField("sNum1",
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()))
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "doc['num1'].value", Collections.emptyMap()))
|
||||
.get();
|
||||
|
||||
assertThat(response.getHits().totalHits(), equalTo(3L));
|
||||
|
BIN
core/src/test/resources/indices/bwc/index-2.4.2.zip
Normal file
BIN
core/src/test/resources/indices/bwc/index-2.4.2.zip
Normal file
Binary file not shown.
BIN
core/src/test/resources/indices/bwc/repo-2.4.2.zip
Normal file
BIN
core/src/test/resources/indices/bwc/repo-2.4.2.zip
Normal file
Binary file not shown.
@ -64,7 +64,7 @@ def main():
|
||||
if c.version == '1.2.0':
|
||||
# 1.2.0 was pulled from download.elasticsearch.org because of routing bug:
|
||||
url = 'http://central.maven.org/maven2/org/elasticsearch/elasticsearch/1.2.0/%s' % filename
|
||||
elif c.version.startswith('0.') or c.version.startswith('1.'):
|
||||
elif c.version.startswith('0.') or c.version.startswith('1.') or c.version.startswith('2.'):
|
||||
url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename
|
||||
else:
|
||||
url = 'https://artifacts.elastic.co/downloads/elasticsearch/%s' % filename
|
||||
|
@ -5,6 +5,7 @@
|
||||
Besides the link:/guide[officially supported Elasticsearch clients], there are
|
||||
a number of clients that have been contributed by the community for various languages:
|
||||
|
||||
* <<b4j>>
|
||||
* <<clojure>>
|
||||
* <<cold-fusion>>
|
||||
* <<erlang>>
|
||||
@ -25,6 +26,10 @@ a number of clients that have been contributed by the community for various lang
|
||||
* <<smalltalk>>
|
||||
* <<vertx>>
|
||||
|
||||
[[b4j]]
|
||||
== B4J
|
||||
* https://www.b4x.com/android/forum/threads/server-jelasticsearch-search-and-text-analytics.73335/
|
||||
B4J client based on the official Java REST client.
|
||||
|
||||
[[clojure]]
|
||||
== Clojure
|
||||
|
@ -5,21 +5,6 @@ Here is how you can use
|
||||
{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Scripted Metric Aggregation]
|
||||
with Java API.
|
||||
|
||||
Don't forget to add Groovy in your classpath if you want to run Groovy scripts in an embedded data node
|
||||
(for unit tests for example).
|
||||
For example, with Maven, add this dependency to your `pom.xml` file:
|
||||
|
||||
[source,xml]
|
||||
--------------------------------------------------
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-all</artifactId>
|
||||
<version>2.3.2</version>
|
||||
<classifier>indy</classifier>
|
||||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
===== Prepare aggregation request
|
||||
|
||||
Here is an example on how to create the aggregation request:
|
||||
|
@ -10,6 +10,15 @@ Compatible with Elasticsearch 2.x and onwards.
|
||||
|
||||
=== Maven Repository
|
||||
|
||||
The low-level REST client is subject to the same release cycle as
|
||||
elasticsearch. Replace the version with the desired sniffer version, first
|
||||
released with `5.0.0-alpha4`. There is no relation between the sniffer version
|
||||
and the elasticsearch version that the client can communicate with. Sniffer
|
||||
supports fetching the nodes list from elasticsearch 2.x and onwards.
|
||||
|
||||
|
||||
==== Maven configuration
|
||||
|
||||
Here is how you can configure the dependency using maven as a dependency manager.
|
||||
Add the following to your `pom.xml` file:
|
||||
|
||||
@ -22,11 +31,17 @@ Add the following to your `pom.xml` file:
|
||||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
The low-level REST client is subject to the same release cycle as
|
||||
elasticsearch. Replace `${es.version}` with the desired sniffer version, first
|
||||
released with `5.0.0-alpha4`. There is no relation between the sniffer version
|
||||
and the elasticsearch version that the client can communicate with. Sniffer
|
||||
supports fetching the nodes list from elasticsearch 2.x and onwards.
|
||||
==== Gradle configuration
|
||||
|
||||
Here is how you can configure the dependency using gradle as a dependency manager.
|
||||
Add the following to your `build.gradle` file:
|
||||
|
||||
["source","groovy",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
dependencies {
|
||||
compile 'org.elasticsearch.client:sniffer:{version}'
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
=== Usage
|
||||
|
||||
|
@ -6,6 +6,14 @@ The low-level Java REST client is hosted on
|
||||
http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.elasticsearch.client%22[Maven
|
||||
Central]. The minimum Java version required is `1.7`.
|
||||
|
||||
The low-level REST client is subject to the same release cycle as
|
||||
elasticsearch. Replace the version with the desired client version, first
|
||||
released with `5.0.0-alpha4`. There is no relation between the client version
|
||||
and the elasticsearch version that the client can communicate with. The
|
||||
low-level REST client is compatible with all elasticsearch versions.
|
||||
|
||||
==== Maven configuration
|
||||
|
||||
Here is how you can configure the dependency using maven as a dependency manager.
|
||||
Add the following to your `pom.xml` file:
|
||||
|
||||
@ -18,11 +26,17 @@ Add the following to your `pom.xml` file:
|
||||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
The low-level REST client is subject to the same release cycle as
|
||||
elasticsearch. Replace `${es.version}` with the desired client version, first
|
||||
released with `5.0.0-alpha4`. There is no relation between the client version
|
||||
and the elasticsearch version that the client can communicate with. The
|
||||
low-level REST client is compatible with all elasticsearch versions.
|
||||
==== Gradle configuration
|
||||
|
||||
Here is how you can configure the dependency using gradle as a dependency manager.
|
||||
Add the following to your `build.gradle` file:
|
||||
|
||||
["source","groovy",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
dependencies {
|
||||
compile 'org.elasticsearch.client:rest:{version}'
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
=== Dependencies
|
||||
|
||||
@ -140,8 +154,8 @@ void performRequestAsync(String method, String endpoint,
|
||||
void performRequestAsync(String method, String endpoint,
|
||||
Map<String, String> params,
|
||||
HttpEntity entity,
|
||||
ResponseListener responseListener,
|
||||
HttpAsyncResponseConsumerFactory responseConsumerFactory,
|
||||
ResponseListener responseListener,
|
||||
Header... headers);
|
||||
--------------------------------------------------
|
||||
|
||||
|
@ -586,12 +586,82 @@ generates an edit distance based on the length of the term. For lengths:
|
||||
--
|
||||
|
||||
[float]
|
||||
=== Result Casing
|
||||
[[common-options-error-options]]
|
||||
=== Enabling stack traces
|
||||
|
||||
All REST APIs accept the `case` parameter. When set to `camelCase`, all
|
||||
field names in the result will be returned in camel casing, otherwise,
|
||||
underscore casing will be used. Note, this does not apply to the source
|
||||
document indexed.
|
||||
By default when a request returns an error Elasticsearch doesn't include the
|
||||
stack trace of the error. You can enable that behavior by setting the
|
||||
`error_trace` url parameter to `true`. For example, by default when you send an
|
||||
invalid `size` parameter to the `_search` API:
|
||||
|
||||
[source,js]
|
||||
----------------------------------------------------------------------
|
||||
POST /twitter/_search?size=surprise_me
|
||||
----------------------------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[catch:request]
|
||||
|
||||
The response looks like:
|
||||
|
||||
[source,js]
|
||||
----------------------------------------------------------------------
|
||||
{
|
||||
"error" : {
|
||||
"root_cause" : [
|
||||
{
|
||||
"type" : "illegal_argument_exception",
|
||||
"reason" : "Failed to parse int parameter [size] with value [surprise_me]"
|
||||
}
|
||||
],
|
||||
"type" : "illegal_argument_exception",
|
||||
"reason" : "Failed to parse int parameter [size] with value [surprise_me]",
|
||||
"caused_by" : {
|
||||
"type" : "number_format_exception",
|
||||
"reason" : "For input string: \"surprise_me\""
|
||||
}
|
||||
},
|
||||
"status" : 400
|
||||
}
|
||||
----------------------------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
But if you set `error_trace=true`:
|
||||
|
||||
[source,js]
|
||||
----------------------------------------------------------------------
|
||||
POST /twitter/_search?size=surprise_me&error_trace=true
|
||||
----------------------------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[catch:request]
|
||||
|
||||
The response looks like:
|
||||
|
||||
[source,js]
|
||||
----------------------------------------------------------------------
|
||||
{
|
||||
"error": {
|
||||
"root_cause": [
|
||||
{
|
||||
"type": "illegal_argument_exception",
|
||||
"reason": "Failed to parse int parameter [size] with value [surprise_me]",
|
||||
"stack_trace": "Failed to parse int parameter [size] with value [surprise_me]]; nested: IllegalArgumentException..."
|
||||
}
|
||||
],
|
||||
"type": "illegal_argument_exception",
|
||||
"reason": "Failed to parse int parameter [size] with value [surprise_me]",
|
||||
"stack_trace": "java.lang.IllegalArgumentException: Failed to parse int parameter [size] with value [surprise_me]\n at org.elasticsearch.rest.RestRequest.paramAsInt(RestRequest.java:175)...",
|
||||
"caused_by": {
|
||||
"type": "number_format_exception",
|
||||
"reason": "For input string: \"surprise_me\"",
|
||||
"stack_trace": "java.lang.NumberFormatException: For input string: \"surprise_me\"\n at java.lang.NumberFormatException.forInputString(NumberFormatException.java:65)..."
|
||||
}
|
||||
},
|
||||
"status": 400
|
||||
}
|
||||
----------------------------------------------------------------------
|
||||
// TESTRESPONSE[s/"stack_trace": "Failed to parse int parameter.+\.\.\."/"stack_trace": $body.error.root_cause.0.stack_trace/]
|
||||
// TESTRESPONSE[s/"stack_trace": "java.lang.IllegalArgum.+\.\.\."/"stack_trace": $body.error.stack_trace/]
|
||||
// TESTRESPONSE[s/"stack_trace": "java.lang.Number.+\.\.\."/"stack_trace": $body.error.caused_by.stack_trace/]
|
||||
|
||||
[float]
|
||||
=== Request body in query string
|
||||
|
@ -66,9 +66,9 @@ these documents. In case a search or bulk request got rejected, `_delete_by_quer
|
||||
exponential back off). Reaching the maximum retries limit causes the `_delete_by_query`
|
||||
to abort and all failures are returned in the `failures` of the response.
|
||||
The deletions that have been performed still stick. In other words, the process
|
||||
is not rolled back, only aborted. While the first failure causes the abort all
|
||||
is not rolled back, only aborted. While the first failure causes the abort, all
|
||||
failures that are returned by the failing bulk request are returned in the `failures`
|
||||
element so it's possible for there to be quite a few.
|
||||
element; therefore it's possible for there to be quite a few failed entities.
|
||||
|
||||
If you'd like to count version conflicts rather than cause them to abort then
|
||||
set `conflicts=proceed` on the url or `"conflicts": "proceed"` in the request body.
|
||||
|
@ -333,7 +333,7 @@ better GET scaling we will have.
|
||||
You can use the `version` parameter to retrieve the document only if
|
||||
its current version is equal to the specified one. This behavior is the same
|
||||
for all version types with the exception of version type `FORCE` which always
|
||||
retrieves the document.
|
||||
retrieves the document. Note that `FORCE` version type is deprecated.
|
||||
|
||||
Internally, Elasticsearch has marked the old document as deleted and added an
|
||||
entirely new document. The old version of the document doesn’t disappear
|
||||
|
@ -153,7 +153,6 @@ If everything goes well, you should see a bunch of messages that look like below
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [aggs-matrix-stats]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [ingest-common]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-expression]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-groovy]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-mustache]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-painless]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [percolator]
|
||||
|
@ -40,5 +40,5 @@ as follows:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl -XGET 'http://localhost:9200/2013-*/_settings/name=index.number_*'
|
||||
curl -XGET 'http://localhost:9200/2013-*/_settings/index.number_*'
|
||||
--------------------------------------------------
|
||||
|
@ -33,6 +33,7 @@ way to reindex old indices is to use the `reindex` API.
|
||||
* <<breaking_60_settings_changes>>
|
||||
* <<breaking_60_plugins_changes>>
|
||||
* <<breaking_60_indices_changes>>
|
||||
* <<breaking_60_scripting_changes>>
|
||||
|
||||
include::migrate_6_0/cat.asciidoc[]
|
||||
|
||||
@ -51,3 +52,5 @@ include::migrate_6_0/settings.asciidoc[]
|
||||
include::migrate_6_0/plugins.asciidoc[]
|
||||
|
||||
include::migrate_6_0/indices.asciidoc[]
|
||||
|
||||
include::migrate_6_0/scripting.asciidoc[]
|
||||
|
7
docs/reference/migration/migrate_6_0/scripting.asciidoc
Normal file
7
docs/reference/migration/migrate_6_0/scripting.asciidoc
Normal file
@ -0,0 +1,7 @@
|
||||
[[breaking_60_scripting_changes]]
|
||||
=== Scripting changes
|
||||
|
||||
==== Groovy language removed
|
||||
|
||||
The groovy scripting language was deprecated in elasticsearch 5.0 and is now removed.
|
||||
Use painless instead.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user