Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
3d6fb4eb0b
82
build.gradle
82
build.gradle
|
@ -62,7 +62,12 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// introspect all versions of ES that may be tested agains for backwards compatibility
|
/* Introspect all versions of ES that may be tested agains for backwards
|
||||||
|
* compatibility. It is *super* important that this logic is the same as the
|
||||||
|
* logic in VersionUtils.java, modulo alphas, betas, and rcs which are ignored
|
||||||
|
* in gradle because they don't have any backwards compatibility guarantees
|
||||||
|
* but are not ignored in VersionUtils.java because the tests expect them not
|
||||||
|
* to be. */
|
||||||
Version currentVersion = Version.fromString(VersionProperties.elasticsearch.minus('-SNAPSHOT'))
|
Version currentVersion = Version.fromString(VersionProperties.elasticsearch.minus('-SNAPSHOT'))
|
||||||
int prevMajor = currentVersion.major - 1
|
int prevMajor = currentVersion.major - 1
|
||||||
File versionFile = file('core/src/main/java/org/elasticsearch/Version.java')
|
File versionFile = file('core/src/main/java/org/elasticsearch/Version.java')
|
||||||
|
@ -72,13 +77,14 @@ List<Version> versions = []
|
||||||
int prevMinorIndex = -1 // index in the versions list of the last minor from the prev major
|
int prevMinorIndex = -1 // index in the versions list of the last minor from the prev major
|
||||||
int lastPrevMinor = -1 // the minor version number from the prev major we most recently seen
|
int lastPrevMinor = -1 // the minor version number from the prev major we most recently seen
|
||||||
for (String line : versionLines) {
|
for (String line : versionLines) {
|
||||||
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_UNRELEASED)? .*/
|
/* Note that this skips alphas and betas which is fine because they aren't
|
||||||
|
* compatible with anything. */
|
||||||
|
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+) .*/
|
||||||
if (match.matches()) {
|
if (match.matches()) {
|
||||||
int major = Integer.parseInt(match.group(1))
|
int major = Integer.parseInt(match.group(1))
|
||||||
int minor = Integer.parseInt(match.group(2))
|
int minor = Integer.parseInt(match.group(2))
|
||||||
int bugfix = Integer.parseInt(match.group(3))
|
int bugfix = Integer.parseInt(match.group(3))
|
||||||
boolean unreleased = match.group(4) != null
|
Version foundVersion = new Version(major, minor, bugfix, false)
|
||||||
Version foundVersion = new Version(major, minor, bugfix, false, unreleased)
|
|
||||||
if (currentVersion != foundVersion) {
|
if (currentVersion != foundVersion) {
|
||||||
versions.add(foundVersion)
|
versions.add(foundVersion)
|
||||||
}
|
}
|
||||||
|
@ -96,10 +102,13 @@ if (currentVersion.bugfix == 0) {
|
||||||
// If on a release branch, after the initial release of that branch, the bugfix version will
|
// If on a release branch, after the initial release of that branch, the bugfix version will
|
||||||
// be bumped, and will be != 0. On master and N.x branches, we want to test against the
|
// be bumped, and will be != 0. On master and N.x branches, we want to test against the
|
||||||
// unreleased version of closest branch. So for those cases, the version includes -SNAPSHOT,
|
// unreleased version of closest branch. So for those cases, the version includes -SNAPSHOT,
|
||||||
// and the bwc-zip distribution will checkout and build that version.
|
// and the bwc distribution will checkout and build that version.
|
||||||
Version last = versions[-1]
|
Version last = versions[-1]
|
||||||
versions[-1] = new Version(last.major, last.minor, last.bugfix,
|
versions[-1] = new Version(last.major, last.minor, last.bugfix, true)
|
||||||
true, last.unreleased)
|
if (last.bugfix == 0) {
|
||||||
|
versions[-2] = new Version(
|
||||||
|
versions[-2].major, versions[-2].minor, versions[-2].bugfix, true)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// injecting groovy property variables into all projects
|
// injecting groovy property variables into all projects
|
||||||
|
@ -114,6 +123,44 @@ allprojects {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
task('verifyVersions') {
|
||||||
|
description 'Verifies that all released versions that are indexed compatible are listed in Version.java.'
|
||||||
|
group 'Verification'
|
||||||
|
enabled = false == gradle.startParameter.isOffline()
|
||||||
|
doLast {
|
||||||
|
// Read the list from maven central
|
||||||
|
Node xml
|
||||||
|
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||||
|
xml = new XmlParser().parse(s)
|
||||||
|
}
|
||||||
|
Set<String> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ })
|
||||||
|
|
||||||
|
// Limit the known versions to those that should be index compatible
|
||||||
|
knownVersions = knownVersions.findAll { Integer.parseInt(it.split('\\.')[0]) >= prevMajor }
|
||||||
|
|
||||||
|
/* Limit the listed versions to those that have been marked as released.
|
||||||
|
* Versions not marked as released don't get the same testing and we want
|
||||||
|
* to make sure that we flip all unreleased versions to released as soon
|
||||||
|
* as possible after release. */
|
||||||
|
Set<String> actualVersions = new TreeSet<>(
|
||||||
|
indexCompatVersions
|
||||||
|
.findAll { false == it.snapshot }
|
||||||
|
.collect { it.toString() })
|
||||||
|
|
||||||
|
// TODO this is almost certainly going to fail on 5.4 when we release 5.5.0
|
||||||
|
|
||||||
|
// Finally, compare!
|
||||||
|
if (!knownVersions.equals(actualVersions)) {
|
||||||
|
throw new GradleException("out-of-date versions\nActual :" +
|
||||||
|
actualVersions + "\nExpected:" + knownVersions +
|
||||||
|
"; update Version.java")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
task('precommit') {
|
||||||
|
dependsOn(verifyVersions)
|
||||||
|
}
|
||||||
|
|
||||||
subprojects {
|
subprojects {
|
||||||
project.afterEvaluate {
|
project.afterEvaluate {
|
||||||
// include license and notice in jars
|
// include license and notice in jars
|
||||||
|
@ -168,12 +215,25 @@ subprojects {
|
||||||
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
|
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
|
||||||
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
|
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
|
||||||
"org.elasticsearch.plugin:parent-join-client:${version}": ':modules:parent-join',
|
"org.elasticsearch.plugin:parent-join-client:${version}": ':modules:parent-join',
|
||||||
|
"org.elasticsearch.plugin:aggs-matrix-stats-client:${version}": ':modules:aggs-matrix-stats',
|
||||||
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
|
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
|
||||||
]
|
]
|
||||||
if (wireCompatVersions[-1].snapshot) {
|
if (indexCompatVersions[-1].snapshot) {
|
||||||
// if the most previous version is a snapshot, we need to connect that version to the
|
/* The last and second to last versions can be snapshots. Rather than use
|
||||||
// bwc-zip project which will checkout and build that snapshot version
|
* snapshots built by CI we connect these versions to projects that build
|
||||||
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${wireCompatVersions[-1]}"] = ':distribution:bwc-zip'
|
* those those versions from the HEAD of the appropriate branch. */
|
||||||
|
if (indexCompatVersions[-1].bugfix == 0) {
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||||
|
} else {
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
|
||||||
|
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
project.afterEvaluate {
|
project.afterEvaluate {
|
||||||
configurations.all {
|
configurations.all {
|
||||||
|
|
|
@ -311,7 +311,13 @@ class BuildPlugin implements Plugin<Project> {
|
||||||
/**
|
/**
|
||||||
* Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms.
|
* Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms.
|
||||||
*
|
*
|
||||||
* The current fixup is to set compile time deps back to compile from runtime (known issue with maven-publish plugin).
|
* <ul>
|
||||||
|
* <li>Remove transitive dependencies. We currently exclude all artifacts explicitly instead of using wildcards
|
||||||
|
* as Ivy incorrectly translates POMs with * excludes to Ivy XML with * excludes which results in the main artifact
|
||||||
|
* being excluded as well (see https://issues.apache.org/jira/browse/IVY-1531). Note that Gradle 2.14+ automatically
|
||||||
|
* translates non-transitive dependencies to * excludes. We should revisit this when upgrading Gradle.</li>
|
||||||
|
* <li>Set compile time deps back to compile from runtime (known issue with maven-publish plugin)</li>
|
||||||
|
* </ul>
|
||||||
*/
|
*/
|
||||||
private static Closure fixupDependencies(Project project) {
|
private static Closure fixupDependencies(Project project) {
|
||||||
return { XmlProvider xml ->
|
return { XmlProvider xml ->
|
||||||
|
@ -321,15 +327,53 @@ class BuildPlugin implements Plugin<Project> {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check each dependency for any transitive deps
|
||||||
|
for (Node depNode : depsNodes.get(0).children()) {
|
||||||
|
String groupId = depNode.get('groupId').get(0).text()
|
||||||
|
String artifactId = depNode.get('artifactId').get(0).text()
|
||||||
|
String version = depNode.get('version').get(0).text()
|
||||||
|
|
||||||
// fix deps incorrectly marked as runtime back to compile time deps
|
// fix deps incorrectly marked as runtime back to compile time deps
|
||||||
// see https://discuss.gradle.org/t/maven-publish-plugin-generated-pom-making-dependency-scope-runtime/7494/4
|
// see https://discuss.gradle.org/t/maven-publish-plugin-generated-pom-making-dependency-scope-runtime/7494/4
|
||||||
for (Node depNode : depsNodes.get(0).children()) {
|
|
||||||
boolean isCompileDep = project.configurations.compile.allDependencies.find { dep ->
|
boolean isCompileDep = project.configurations.compile.allDependencies.find { dep ->
|
||||||
dep.name == depNode.artifactId.text()
|
dep.name == depNode.artifactId.text()
|
||||||
}
|
}
|
||||||
if (depNode.scope.text() == 'runtime' && isCompileDep) {
|
if (depNode.scope.text() == 'runtime' && isCompileDep) {
|
||||||
depNode.scope*.value = 'compile'
|
depNode.scope*.value = 'compile'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remove any exclusions added by gradle, they contain wildcards and systems like ivy have bugs with wildcards
|
||||||
|
// see https://github.com/elastic/elasticsearch/issues/24490
|
||||||
|
NodeList exclusionsNode = depNode.get('exclusions')
|
||||||
|
if (exclusionsNode.size() > 0) {
|
||||||
|
depNode.remove(exclusionsNode.get(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// collect the transitive deps now that we know what this dependency is
|
||||||
|
String depConfig = transitiveDepConfigName(groupId, artifactId, version)
|
||||||
|
Configuration configuration = project.configurations.findByName(depConfig)
|
||||||
|
if (configuration == null) {
|
||||||
|
continue // we did not make this dep non-transitive
|
||||||
|
}
|
||||||
|
Set<ResolvedArtifact> artifacts = configuration.resolvedConfiguration.resolvedArtifacts
|
||||||
|
if (artifacts.size() <= 1) {
|
||||||
|
// this dep has no transitive deps (or the only artifact is itself)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// we now know we have something to exclude, so add exclusions for all artifacts except the main one
|
||||||
|
Node exclusions = depNode.appendNode('exclusions')
|
||||||
|
for (ResolvedArtifact artifact : artifacts) {
|
||||||
|
ModuleVersionIdentifier moduleVersionIdentifier = artifact.moduleVersion.id;
|
||||||
|
String depGroupId = moduleVersionIdentifier.group
|
||||||
|
String depArtifactId = moduleVersionIdentifier.name
|
||||||
|
// add exclusions for all artifacts except the main one
|
||||||
|
if (depGroupId != groupId || depArtifactId != artifactId) {
|
||||||
|
Node exclusion = exclusions.appendNode('exclusion')
|
||||||
|
exclusion.appendNode('groupId', depGroupId)
|
||||||
|
exclusion.appendNode('artifactId', depArtifactId)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,20 +29,14 @@ public class Version {
|
||||||
final int bugfix
|
final int bugfix
|
||||||
final int id
|
final int id
|
||||||
final boolean snapshot
|
final boolean snapshot
|
||||||
/**
|
|
||||||
* Is the vesion listed as {@code _UNRELEASED} in Version.java.
|
|
||||||
*/
|
|
||||||
final boolean unreleased
|
|
||||||
|
|
||||||
public Version(int major, int minor, int bugfix, boolean snapshot,
|
public Version(int major, int minor, int bugfix, boolean snapshot) {
|
||||||
boolean unreleased) {
|
|
||||||
this.major = major
|
this.major = major
|
||||||
this.minor = minor
|
this.minor = minor
|
||||||
this.bugfix = bugfix
|
this.bugfix = bugfix
|
||||||
this.snapshot = snapshot
|
this.snapshot = snapshot
|
||||||
this.id = major * 100000 + minor * 1000 + bugfix * 10 +
|
this.id = major * 100000 + minor * 1000 + bugfix * 10 +
|
||||||
(snapshot ? 1 : 0)
|
(snapshot ? 1 : 0)
|
||||||
this.unreleased = unreleased
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Version fromString(String s) {
|
public static Version fromString(String s) {
|
||||||
|
@ -54,7 +48,7 @@ public class Version {
|
||||||
bugfix = bugfix.split('-')[0]
|
bugfix = bugfix.split('-')[0]
|
||||||
}
|
}
|
||||||
return new Version(parts[0] as int, parts[1] as int, bugfix as int,
|
return new Version(parts[0] as int, parts[1] as int, bugfix as int,
|
||||||
snapshot, false)
|
snapshot)
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -168,6 +168,8 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||||
current.println(" - skip:")
|
current.println(" - skip:")
|
||||||
current.println(" features: ")
|
current.println(" features: ")
|
||||||
current.println(" - stash_in_key")
|
current.println(" - stash_in_key")
|
||||||
|
current.println(" - stash_in_path")
|
||||||
|
current.println(" - stash_path_replace")
|
||||||
current.println(" - warnings")
|
current.println(" - warnings")
|
||||||
}
|
}
|
||||||
if (test.skipTest) {
|
if (test.skipTest) {
|
||||||
|
|
|
@ -90,6 +90,7 @@ public class SnippetsTask extends DefaultTask {
|
||||||
* tests cleaner.
|
* tests cleaner.
|
||||||
*/
|
*/
|
||||||
subst = subst.replace('$body', '\\$body')
|
subst = subst.replace('$body', '\\$body')
|
||||||
|
subst = subst.replace('$_path', '\\$_path')
|
||||||
// \n is a new line....
|
// \n is a new line....
|
||||||
subst = subst.replace('\\n', '\n')
|
subst = subst.replace('\\n', '\n')
|
||||||
snippet.contents = snippet.contents.replaceAll(
|
snippet.contents = snippet.contents.replaceAll(
|
||||||
|
|
|
@ -46,11 +46,11 @@ class ClusterConfiguration {
|
||||||
int transportPort = 0
|
int transportPort = 0
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An override of the data directory. This may only be used with a single node.
|
* An override of the data directory. Input is the node number and output
|
||||||
* The value is lazily evaluated at runtime as a String path.
|
* is the override data directory.
|
||||||
*/
|
*/
|
||||||
@Input
|
@Input
|
||||||
Object dataDir = null
|
Closure<String> dataDir = null
|
||||||
|
|
||||||
/** Optional override of the cluster name. */
|
/** Optional override of the cluster name. */
|
||||||
@Input
|
@Input
|
||||||
|
|
|
@ -169,7 +169,7 @@ class ClusterFormationTasks {
|
||||||
|
|
||||||
if (node.config.plugins.isEmpty() == false) {
|
if (node.config.plugins.isEmpty() == false) {
|
||||||
if (node.nodeVersion == VersionProperties.elasticsearch) {
|
if (node.nodeVersion == VersionProperties.elasticsearch) {
|
||||||
setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node)
|
setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node, prefix)
|
||||||
} else {
|
} else {
|
||||||
setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node, prefix)
|
setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node, prefix)
|
||||||
}
|
}
|
||||||
|
@ -184,7 +184,7 @@ class ClusterFormationTasks {
|
||||||
// install plugins
|
// install plugins
|
||||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||||
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue())
|
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue(), prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sets up any extra config files that need to be copied over to the ES instance;
|
// sets up any extra config files that need to be copied over to the ES instance;
|
||||||
|
@ -379,7 +379,7 @@ class ClusterFormationTasks {
|
||||||
* For each plugin, if the plugin has rest spec apis in its tests, those api files are also copied
|
* For each plugin, if the plugin has rest spec apis in its tests, those api files are also copied
|
||||||
* to the test resources for this project.
|
* to the test resources for this project.
|
||||||
*/
|
*/
|
||||||
static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node) {
|
static Task configureCopyPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) {
|
||||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||||
|
|
||||||
List<FileCollection> pluginFiles = []
|
List<FileCollection> pluginFiles = []
|
||||||
|
@ -387,7 +387,7 @@ class ClusterFormationTasks {
|
||||||
|
|
||||||
Project pluginProject = plugin.getValue()
|
Project pluginProject = plugin.getValue()
|
||||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||||
String configurationName = "_plugin_${pluginProject.path}"
|
String configurationName = "_plugin_${prefix}_${pluginProject.path}"
|
||||||
Configuration configuration = project.configurations.findByName(configurationName)
|
Configuration configuration = project.configurations.findByName(configurationName)
|
||||||
if (configuration == null) {
|
if (configuration == null) {
|
||||||
configuration = project.configurations.create(configurationName)
|
configuration = project.configurations.create(configurationName)
|
||||||
|
@ -422,7 +422,7 @@ class ClusterFormationTasks {
|
||||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||||
Project pluginProject = plugin.getValue()
|
Project pluginProject = plugin.getValue()
|
||||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||||
String configurationName = "_plugin_bwc_${pluginProject.path}"
|
String configurationName = "_plugin_bwc_${prefix}_${pluginProject.path}"
|
||||||
Configuration configuration = project.configurations.findByName(configurationName)
|
Configuration configuration = project.configurations.findByName(configurationName)
|
||||||
if (configuration == null) {
|
if (configuration == null) {
|
||||||
configuration = project.configurations.create(configurationName)
|
configuration = project.configurations.create(configurationName)
|
||||||
|
@ -457,12 +457,12 @@ class ClusterFormationTasks {
|
||||||
return installModule
|
return installModule
|
||||||
}
|
}
|
||||||
|
|
||||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin) {
|
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin, String prefix) {
|
||||||
final FileCollection pluginZip;
|
final FileCollection pluginZip;
|
||||||
if (node.nodeVersion != VersionProperties.elasticsearch) {
|
if (node.nodeVersion != VersionProperties.elasticsearch) {
|
||||||
pluginZip = project.configurations.getByName("_plugin_bwc_${plugin.path}")
|
pluginZip = project.configurations.getByName("_plugin_bwc_${prefix}_${plugin.path}")
|
||||||
} else {
|
} else {
|
||||||
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
pluginZip = project.configurations.getByName("_plugin_${prefix}_${plugin.path}")
|
||||||
}
|
}
|
||||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||||
Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||||
|
@ -657,11 +657,11 @@ class ClusterFormationTasks {
|
||||||
standardOutput = new ByteArrayOutputStream()
|
standardOutput = new ByteArrayOutputStream()
|
||||||
doLast {
|
doLast {
|
||||||
String out = standardOutput.toString()
|
String out = standardOutput.toString()
|
||||||
if (out.contains("${pid} org.elasticsearch.bootstrap.Elasticsearch") == false) {
|
if (out.contains("${ext.pid} org.elasticsearch.bootstrap.Elasticsearch") == false) {
|
||||||
logger.error('jps -l')
|
logger.error('jps -l')
|
||||||
logger.error(out)
|
logger.error(out)
|
||||||
logger.error("pid file: ${pidFile}")
|
logger.error("pid file: ${node.pidFile}")
|
||||||
logger.error("pid: ${pid}")
|
logger.error("pid: ${ext.pid}")
|
||||||
throw new GradleException("jps -l did not report any process with org.elasticsearch.bootstrap.Elasticsearch\n" +
|
throw new GradleException("jps -l did not report any process with org.elasticsearch.bootstrap.Elasticsearch\n" +
|
||||||
"Did you run gradle clean? Maybe an old pid file is still lying around.")
|
"Did you run gradle clean? Maybe an old pid file is still lying around.")
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -111,10 +111,7 @@ class NodeInfo {
|
||||||
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
|
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
|
||||||
confDir = confDir(baseDir, config.distribution, nodeVersion)
|
confDir = confDir(baseDir, config.distribution, nodeVersion)
|
||||||
if (config.dataDir != null) {
|
if (config.dataDir != null) {
|
||||||
if (config.numNodes != 1) {
|
dataDir = "${config.dataDir(nodeNum)}"
|
||||||
throw new IllegalArgumentException("Cannot set data dir for integ test with more than one node")
|
|
||||||
}
|
|
||||||
dataDir = config.dataDir
|
|
||||||
} else {
|
} else {
|
||||||
dataDir = new File(homeDir, "data")
|
dataDir = new File(homeDir, "data")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||||
elasticsearch = 6.0.0-alpha2
|
elasticsearch = 6.0.0-alpha2
|
||||||
lucene = 7.0.0-snapshot-89f6d17
|
lucene = 7.0.0-snapshot-a0aef2f
|
||||||
|
|
||||||
# optional dependencies
|
# optional dependencies
|
||||||
spatial4j = 0.6
|
spatial4j = 0.6
|
||||||
|
|
|
@ -26,11 +26,11 @@ group = 'org.elasticsearch.client'
|
||||||
dependencies {
|
dependencies {
|
||||||
compile "org.elasticsearch:elasticsearch:${version}"
|
compile "org.elasticsearch:elasticsearch:${version}"
|
||||||
compile "org.elasticsearch.client:rest:${version}"
|
compile "org.elasticsearch.client:rest:${version}"
|
||||||
|
compile "org.elasticsearch.plugin:parent-join-client:${version}"
|
||||||
|
compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}"
|
||||||
|
|
||||||
testCompile "org.elasticsearch.client:test:${version}"
|
testCompile "org.elasticsearch.client:test:${version}"
|
||||||
testCompile "org.elasticsearch.test:framework:${version}"
|
testCompile "org.elasticsearch.test:framework:${version}"
|
||||||
// for parent/child testing
|
|
||||||
testCompile "org.elasticsearch.plugin:parent-join-client:${version}"
|
|
||||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||||
testCompile "junit:junit:${versions.junit}"
|
testCompile "junit:junit:${versions.junit}"
|
||||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||||
|
|
|
@ -33,7 +33,9 @@ import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
import org.elasticsearch.action.delete.DeleteRequest;
|
||||||
import org.elasticsearch.action.get.GetRequest;
|
import org.elasticsearch.action.get.GetRequest;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.action.support.ActiveShardCount;
|
import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.action.support.WriteRequest;
|
import org.elasticsearch.action.support.WriteRequest;
|
||||||
import org.elasticsearch.action.update.UpdateRequest;
|
import org.elasticsearch.action.update.UpdateRequest;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -42,11 +44,13 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.lucene.uid.Versions;
|
import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
|
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
|
@ -59,7 +63,7 @@ import java.util.StringJoiner;
|
||||||
|
|
||||||
final class Request {
|
final class Request {
|
||||||
|
|
||||||
private static final String DELIMITER = "/";
|
private static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON;
|
||||||
|
|
||||||
final String method;
|
final String method;
|
||||||
final String endpoint;
|
final String endpoint;
|
||||||
|
@ -79,6 +83,7 @@ final class Request {
|
||||||
"method='" + method + '\'' +
|
"method='" + method + '\'' +
|
||||||
", endpoint='" + endpoint + '\'' +
|
", endpoint='" + endpoint + '\'' +
|
||||||
", params=" + params +
|
", params=" + params +
|
||||||
|
", hasBody=" + (entity != null) +
|
||||||
'}';
|
'}';
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,23 +312,48 @@ final class Request {
|
||||||
xContentType = Requests.INDEX_CONTENT_TYPE;
|
xContentType = Requests.INDEX_CONTENT_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
BytesRef source = XContentHelper.toXContent(updateRequest, xContentType, false).toBytesRef();
|
HttpEntity entity = createEntity(updateRequest, xContentType);
|
||||||
HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType()));
|
|
||||||
|
|
||||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static Request search(SearchRequest searchRequest) throws IOException {
|
||||||
|
String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search");
|
||||||
|
Params params = Params.builder();
|
||||||
|
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
|
||||||
|
params.withRouting(searchRequest.routing());
|
||||||
|
params.withPreference(searchRequest.preference());
|
||||||
|
params.withIndicesOptions(searchRequest.indicesOptions());
|
||||||
|
params.putParam("search_type", searchRequest.searchType().name().toLowerCase(Locale.ROOT));
|
||||||
|
if (searchRequest.requestCache() != null) {
|
||||||
|
params.putParam("request_cache", Boolean.toString(searchRequest.requestCache()));
|
||||||
|
}
|
||||||
|
params.putParam("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize()));
|
||||||
|
if (searchRequest.scroll() != null) {
|
||||||
|
params.putParam("scroll", searchRequest.scroll().keepAlive());
|
||||||
|
}
|
||||||
|
HttpEntity entity = null;
|
||||||
|
if (searchRequest.source() != null) {
|
||||||
|
entity = createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE);
|
||||||
|
}
|
||||||
|
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
|
||||||
|
BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef();
|
||||||
|
return new ByteArrayEntity(source.bytes, source.offset, source.length, ContentType.create(xContentType.mediaType()));
|
||||||
|
}
|
||||||
|
|
||||||
|
static String endpoint(String[] indices, String[] types, String endpoint) {
|
||||||
|
return endpoint(String.join(",", indices), String.join(",", types), endpoint);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility method to build request's endpoint.
|
* Utility method to build request's endpoint.
|
||||||
*/
|
*/
|
||||||
static String endpoint(String... parts) {
|
static String endpoint(String... parts) {
|
||||||
if (parts == null || parts.length == 0) {
|
StringJoiner joiner = new StringJoiner("/", "/", "");
|
||||||
return DELIMITER;
|
|
||||||
}
|
|
||||||
|
|
||||||
StringJoiner joiner = new StringJoiner(DELIMITER, DELIMITER, "");
|
|
||||||
for (String part : parts) {
|
for (String part : parts) {
|
||||||
if (part != null) {
|
if (Strings.hasLength(part)) {
|
||||||
joiner.add(part);
|
joiner.add(part);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -453,6 +483,26 @@ final class Request {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Params withIndicesOptions (IndicesOptions indicesOptions) {
|
||||||
|
putParam("ignore_unavailable", Boolean.toString(indicesOptions.ignoreUnavailable()));
|
||||||
|
putParam("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices()));
|
||||||
|
String expandWildcards;
|
||||||
|
if (indicesOptions.expandWildcardsOpen() == false && indicesOptions.expandWildcardsClosed() == false) {
|
||||||
|
expandWildcards = "none";
|
||||||
|
} else {
|
||||||
|
StringJoiner joiner = new StringJoiner(",");
|
||||||
|
if (indicesOptions.expandWildcardsOpen()) {
|
||||||
|
joiner.add("open");
|
||||||
|
}
|
||||||
|
if (indicesOptions.expandWildcardsClosed()) {
|
||||||
|
joiner.add("closed");
|
||||||
|
}
|
||||||
|
expandWildcards = joiner.toString();
|
||||||
|
}
|
||||||
|
putParam("expand_wildcards", expandWildcards);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
Map<String, String> getParams() {
|
Map<String, String> getParams() {
|
||||||
return Collections.unmodifiableMap(params);
|
return Collections.unmodifiableMap(params);
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,22 +36,117 @@ import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.index.IndexResponse;
|
import org.elasticsearch.action.index.IndexResponse;
|
||||||
import org.elasticsearch.action.main.MainRequest;
|
import org.elasticsearch.action.main.MainRequest;
|
||||||
import org.elasticsearch.action.main.MainResponse;
|
import org.elasticsearch.action.main.MainResponse;
|
||||||
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.action.update.UpdateRequest;
|
import org.elasticsearch.action.update.UpdateRequest;
|
||||||
import org.elasticsearch.action.update.UpdateResponse;
|
import org.elasticsearch.action.update.UpdateResponse;
|
||||||
import org.elasticsearch.common.CheckedFunction;
|
import org.elasticsearch.common.CheckedFunction;
|
||||||
|
import org.elasticsearch.common.ParseField;
|
||||||
|
import org.elasticsearch.common.xcontent.ContextParser;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
|
||||||
|
import org.elasticsearch.join.aggregations.ParsedChildren;
|
||||||
import org.elasticsearch.rest.BytesRestResponse;
|
import org.elasticsearch.rest.BytesRestResponse;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
import org.elasticsearch.search.aggregations.Aggregation;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.adjacency.ParsedAdjacencyMatrix;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilter;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.filters.FiltersAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.filters.ParsedFilters;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.missing.ParsedMissing;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.nested.ParsedNested;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.nested.ParsedReverseNested;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.nested.ReverseNestedAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.ParsedRange;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.date.DateRangeAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.date.ParsedDateRange;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.geodistance.GeoDistanceAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.geodistance.ParsedGeoDistance;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.sampler.ParsedSampler;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.significant.ParsedSignificantLongTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.significant.ParsedSignificantStringTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.significant.SignificantLongTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.significant.SignificantStringTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.ParsedLongTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
|
||||||
|
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.matrix.stats.ParsedMatrixStats;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.avg.ParsedAvg;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.cardinality.ParsedCardinality;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.geobounds.GeoBoundsAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.geobounds.ParsedGeoBounds;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.geocentroid.GeoCentroidAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.geocentroid.ParsedGeoCentroid;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.max.ParsedMax;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.min.ParsedMin;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentileRanks;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.InternalHDRPercentiles;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentileRanks;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.hdr.ParsedHDRPercentiles;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentileRanks;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.InternalTDigestPercentiles;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentileRanks;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.ParsedTDigestPercentiles;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.scripted.ParsedScriptedMetric;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.scripted.ScriptedMetricAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.stats.ParsedStats;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.stats.StatsAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStatsAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.stats.extended.ParsedExtendedStats;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.sum.ParsedSum;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.valuecount.ParsedValueCount;
|
||||||
|
import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.ParsedBucketMetricValue;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.ParsedPercentilesBucket;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.percentile.PercentilesBucketPipelineAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.ParsedStatsBucket;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.StatsBucketPipelineAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ExtendedStatsBucketPipelineAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.stats.extended.ParsedExtendedStatsBucket;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.pipeline.derivative.ParsedDerivative;
|
||||||
|
import org.elasticsearch.search.suggest.Suggest;
|
||||||
|
import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
|
||||||
|
import org.elasticsearch.search.suggest.phrase.PhraseSuggestion;
|
||||||
|
import org.elasticsearch.search.suggest.term.TermSuggestion;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
import static java.util.Collections.emptySet;
|
import static java.util.Collections.emptySet;
|
||||||
|
@ -82,10 +177,8 @@ public class RestHighLevelClient {
|
||||||
*/
|
*/
|
||||||
protected RestHighLevelClient(RestClient restClient, List<NamedXContentRegistry.Entry> namedXContentEntries) {
|
protected RestHighLevelClient(RestClient restClient, List<NamedXContentRegistry.Entry> namedXContentEntries) {
|
||||||
this.client = Objects.requireNonNull(restClient);
|
this.client = Objects.requireNonNull(restClient);
|
||||||
this.registry = new NamedXContentRegistry(Stream.of(
|
this.registry = new NamedXContentRegistry(Stream.of(getDefaultNamedXContents().stream(), namedXContentEntries.stream())
|
||||||
getNamedXContents().stream(),
|
.flatMap(Function.identity()).collect(toList()));
|
||||||
namedXContentEntries.stream()
|
|
||||||
).flatMap(Function.identity()).collect(toList()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -214,6 +307,24 @@ public class RestHighLevelClient {
|
||||||
Collections.singleton(404), headers);
|
Collections.singleton(404), headers);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Executes a search using the Search api
|
||||||
|
*
|
||||||
|
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||||
|
*/
|
||||||
|
public SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException {
|
||||||
|
return performRequestAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, emptySet(), headers);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Asynchronously executes a search using the Search api
|
||||||
|
*
|
||||||
|
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
|
||||||
|
*/
|
||||||
|
public void searchAsync(SearchRequest searchRequest, ActionListener<SearchResponse> listener, Header... headers) {
|
||||||
|
performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers);
|
||||||
|
}
|
||||||
|
|
||||||
private <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
private <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||||
|
@ -365,9 +476,60 @@ public class RestHighLevelClient {
|
||||||
return response.getStatusLine().getStatusCode() == 200;
|
return response.getStatusLine().getStatusCode() == 200;
|
||||||
}
|
}
|
||||||
|
|
||||||
static List<NamedXContentRegistry.Entry> getNamedXContents() {
|
static List<NamedXContentRegistry.Entry> getDefaultNamedXContents() {
|
||||||
List<NamedXContentRegistry.Entry> namedXContents = new ArrayList<>();
|
Map<String, ContextParser<Object, ? extends Aggregation>> map = new HashMap<>();
|
||||||
//namedXContents.add(new NamedXContentRegistry.Entry(Aggregation.class, new ParseField("sterms"), StringTerms::fromXContent));
|
map.put(CardinalityAggregationBuilder.NAME, (p, c) -> ParsedCardinality.fromXContent(p, (String) c));
|
||||||
return namedXContents;
|
map.put(InternalHDRPercentiles.NAME, (p, c) -> ParsedHDRPercentiles.fromXContent(p, (String) c));
|
||||||
|
map.put(InternalHDRPercentileRanks.NAME, (p, c) -> ParsedHDRPercentileRanks.fromXContent(p, (String) c));
|
||||||
|
map.put(InternalTDigestPercentiles.NAME, (p, c) -> ParsedTDigestPercentiles.fromXContent(p, (String) c));
|
||||||
|
map.put(InternalTDigestPercentileRanks.NAME, (p, c) -> ParsedTDigestPercentileRanks.fromXContent(p, (String) c));
|
||||||
|
map.put(PercentilesBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c));
|
||||||
|
map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c));
|
||||||
|
map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c));
|
||||||
|
map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c));
|
||||||
|
map.put(AvgAggregationBuilder.NAME, (p, c) -> ParsedAvg.fromXContent(p, (String) c));
|
||||||
|
map.put(ValueCountAggregationBuilder.NAME, (p, c) -> ParsedValueCount.fromXContent(p, (String) c));
|
||||||
|
map.put(InternalSimpleValue.NAME, (p, c) -> ParsedSimpleValue.fromXContent(p, (String) c));
|
||||||
|
map.put(DerivativePipelineAggregationBuilder.NAME, (p, c) -> ParsedDerivative.fromXContent(p, (String) c));
|
||||||
|
map.put(InternalBucketMetricValue.NAME, (p, c) -> ParsedBucketMetricValue.fromXContent(p, (String) c));
|
||||||
|
map.put(StatsAggregationBuilder.NAME, (p, c) -> ParsedStats.fromXContent(p, (String) c));
|
||||||
|
map.put(StatsBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedStatsBucket.fromXContent(p, (String) c));
|
||||||
|
map.put(ExtendedStatsAggregationBuilder.NAME, (p, c) -> ParsedExtendedStats.fromXContent(p, (String) c));
|
||||||
|
map.put(ExtendedStatsBucketPipelineAggregationBuilder.NAME,
|
||||||
|
(p, c) -> ParsedExtendedStatsBucket.fromXContent(p, (String) c));
|
||||||
|
map.put(GeoBoundsAggregationBuilder.NAME, (p, c) -> ParsedGeoBounds.fromXContent(p, (String) c));
|
||||||
|
map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c));
|
||||||
|
map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c));
|
||||||
|
map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c));
|
||||||
|
map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c));
|
||||||
|
map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c));
|
||||||
|
map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c));
|
||||||
|
map.put(MissingAggregationBuilder.NAME, (p, c) -> ParsedMissing.fromXContent(p, (String) c));
|
||||||
|
map.put(NestedAggregationBuilder.NAME, (p, c) -> ParsedNested.fromXContent(p, (String) c));
|
||||||
|
map.put(ReverseNestedAggregationBuilder.NAME, (p, c) -> ParsedReverseNested.fromXContent(p, (String) c));
|
||||||
|
map.put(GlobalAggregationBuilder.NAME, (p, c) -> ParsedGlobal.fromXContent(p, (String) c));
|
||||||
|
map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c));
|
||||||
|
map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c));
|
||||||
|
map.put(GeoGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c));
|
||||||
|
map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c));
|
||||||
|
map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c));
|
||||||
|
map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c));
|
||||||
|
map.put(FiltersAggregationBuilder.NAME, (p, c) -> ParsedFilters.fromXContent(p, (String) c));
|
||||||
|
map.put(AdjacencyMatrixAggregationBuilder.NAME, (p, c) -> ParsedAdjacencyMatrix.fromXContent(p, (String) c));
|
||||||
|
map.put(SignificantLongTerms.NAME, (p, c) -> ParsedSignificantLongTerms.fromXContent(p, (String) c));
|
||||||
|
map.put(SignificantStringTerms.NAME, (p, c) -> ParsedSignificantStringTerms.fromXContent(p, (String) c));
|
||||||
|
map.put(ScriptedMetricAggregationBuilder.NAME, (p, c) -> ParsedScriptedMetric.fromXContent(p, (String) c));
|
||||||
|
map.put(ChildrenAggregationBuilder.NAME, (p, c) -> ParsedChildren.fromXContent(p, (String) c));
|
||||||
|
map.put(MatrixStatsAggregationBuilder.NAME, (p, c) -> ParsedMatrixStats.fromXContent(p, (String) c));
|
||||||
|
List<NamedXContentRegistry.Entry> entries = map.entrySet().stream()
|
||||||
|
.map(entry -> new NamedXContentRegistry.Entry(Aggregation.class, new ParseField(entry.getKey()), entry.getValue()))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
entries.add(new NamedXContentRegistry.Entry(Suggest.Suggestion.class, new ParseField(TermSuggestion.NAME),
|
||||||
|
(parser, context) -> TermSuggestion.fromXContent(parser, (String)context)));
|
||||||
|
entries.add(new NamedXContentRegistry.Entry(Suggest.Suggestion.class, new ParseField(PhraseSuggestion.NAME),
|
||||||
|
(parser, context) -> PhraseSuggestion.fromXContent(parser, (String)context)));
|
||||||
|
entries.add(new NamedXContentRegistry.Entry(Suggest.Suggestion.class, new ParseField(CompletionSuggestion.NAME),
|
||||||
|
(parser, context) -> CompletionSuggestion.fromXContent(parser, (String)context)));
|
||||||
|
return entries;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
import static java.util.Collections.singletonMap;
|
import static java.util.Collections.singletonMap;
|
||||||
|
@ -63,16 +62,6 @@ import static java.util.Collections.singletonMap;
|
||||||
public class CrudIT extends ESRestHighLevelClientTestCase {
|
public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||||
|
|
||||||
public void testDelete() throws IOException {
|
public void testDelete() throws IOException {
|
||||||
{
|
|
||||||
// Testing non existing document
|
|
||||||
String docId = "does_not_exist";
|
|
||||||
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId);
|
|
||||||
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
|
|
||||||
assertEquals("index", deleteResponse.getIndex());
|
|
||||||
assertEquals("type", deleteResponse.getType());
|
|
||||||
assertEquals(docId, deleteResponse.getId());
|
|
||||||
assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult());
|
|
||||||
}
|
|
||||||
{
|
{
|
||||||
// Testing deletion
|
// Testing deletion
|
||||||
String docId = "id";
|
String docId = "id";
|
||||||
|
@ -87,6 +76,16 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||||
assertEquals(docId, deleteResponse.getId());
|
assertEquals(docId, deleteResponse.getId());
|
||||||
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
|
assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
// Testing non existing document
|
||||||
|
String docId = "does_not_exist";
|
||||||
|
DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId);
|
||||||
|
DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync);
|
||||||
|
assertEquals("index", deleteResponse.getIndex());
|
||||||
|
assertEquals("type", deleteResponse.getType());
|
||||||
|
assertEquals(docId, deleteResponse.getId());
|
||||||
|
assertEquals(DocWriteResponse.Result.NOT_FOUND, deleteResponse.getResult());
|
||||||
|
}
|
||||||
{
|
{
|
||||||
// Testing version conflict
|
// Testing version conflict
|
||||||
String docId = "version_conflict";
|
String docId = "version_conflict";
|
||||||
|
|
|
@ -21,25 +21,41 @@ package org.elasticsearch.client;
|
||||||
|
|
||||||
import org.apache.http.HttpEntity;
|
import org.apache.http.HttpEntity;
|
||||||
import org.apache.http.entity.ByteArrayEntity;
|
import org.apache.http.entity.ByteArrayEntity;
|
||||||
|
import org.apache.http.util.EntityUtils;
|
||||||
import org.elasticsearch.action.DocWriteRequest;
|
import org.elasticsearch.action.DocWriteRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
import org.elasticsearch.action.delete.DeleteRequest;
|
||||||
import org.elasticsearch.action.get.GetRequest;
|
import org.elasticsearch.action.get.GetRequest;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
|
import org.elasticsearch.action.search.SearchType;
|
||||||
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.action.support.WriteRequest;
|
import org.elasticsearch.action.support.WriteRequest;
|
||||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||||
import org.elasticsearch.action.update.UpdateRequest;
|
import org.elasticsearch.action.update.UpdateRequest;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.io.Streams;
|
import org.elasticsearch.common.io.Streams;
|
||||||
import org.elasticsearch.common.lucene.uid.Versions;
|
import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
|
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||||
|
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||||
|
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
|
import org.elasticsearch.search.collapse.CollapseBuilder;
|
||||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||||
|
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
||||||
|
import org.elasticsearch.search.rescore.QueryRescorerBuilder;
|
||||||
|
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||||
|
import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.test.RandomObjects;
|
import org.elasticsearch.test.RandomObjects;
|
||||||
|
|
||||||
|
@ -48,6 +64,7 @@ import java.io.InputStream;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.StringJoiner;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
|
||||||
|
@ -257,9 +274,8 @@ public class RequestTests extends ESTestCase {
|
||||||
assertEquals(method, request.method);
|
assertEquals(method, request.method);
|
||||||
|
|
||||||
HttpEntity entity = request.entity;
|
HttpEntity entity = request.entity;
|
||||||
assertNotNull(entity);
|
|
||||||
assertTrue(entity instanceof ByteArrayEntity);
|
assertTrue(entity instanceof ByteArrayEntity);
|
||||||
|
assertEquals(indexRequest.getContentType().mediaType(), entity.getContentType().getValue());
|
||||||
try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) {
|
try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) {
|
||||||
assertEquals(nbFields, parser.map().size());
|
assertEquals(nbFields, parser.map().size());
|
||||||
}
|
}
|
||||||
|
@ -353,7 +369,6 @@ public class RequestTests extends ESTestCase {
|
||||||
assertEquals("POST", request.method);
|
assertEquals("POST", request.method);
|
||||||
|
|
||||||
HttpEntity entity = request.entity;
|
HttpEntity entity = request.entity;
|
||||||
assertNotNull(entity);
|
|
||||||
assertTrue(entity instanceof ByteArrayEntity);
|
assertTrue(entity instanceof ByteArrayEntity);
|
||||||
|
|
||||||
UpdateRequest parsedUpdateRequest = new UpdateRequest();
|
UpdateRequest parsedUpdateRequest = new UpdateRequest();
|
||||||
|
@ -470,7 +485,7 @@ public class RequestTests extends ESTestCase {
|
||||||
assertEquals("/_bulk", request.endpoint);
|
assertEquals("/_bulk", request.endpoint);
|
||||||
assertEquals(expectedParams, request.params);
|
assertEquals(expectedParams, request.params);
|
||||||
assertEquals("POST", request.method);
|
assertEquals("POST", request.method);
|
||||||
|
assertEquals(xContentType.mediaType(), request.entity.getContentType().getValue());
|
||||||
byte[] content = new byte[(int) request.entity.getContentLength()];
|
byte[] content = new byte[(int) request.entity.getContentLength()];
|
||||||
try (InputStream inputStream = request.entity.getContent()) {
|
try (InputStream inputStream = request.entity.getContent()) {
|
||||||
Streams.readFully(inputStream, content);
|
Streams.readFully(inputStream, content);
|
||||||
|
@ -584,6 +599,127 @@ public class RequestTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testSearch() throws Exception {
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
int numIndices = randomIntBetween(0, 5);
|
||||||
|
String[] indices = new String[numIndices];
|
||||||
|
for (int i = 0; i < numIndices; i++) {
|
||||||
|
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||||
|
}
|
||||||
|
searchRequest.indices(indices);
|
||||||
|
int numTypes = randomIntBetween(0, 5);
|
||||||
|
String[] types = new String[numTypes];
|
||||||
|
for (int i = 0; i < numTypes; i++) {
|
||||||
|
types[i] = "type-" + randomAlphaOfLengthBetween(2, 5);
|
||||||
|
}
|
||||||
|
searchRequest.types(types);
|
||||||
|
|
||||||
|
Map<String, String> expectedParams = new HashMap<>();
|
||||||
|
expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true");
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchRequest.routing(randomAlphaOfLengthBetween(3, 10));
|
||||||
|
expectedParams.put("routing", searchRequest.routing());
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchRequest.preference(randomAlphaOfLengthBetween(3, 10));
|
||||||
|
expectedParams.put("preference", searchRequest.preference());
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchRequest.searchType(randomFrom(SearchType.values()));
|
||||||
|
}
|
||||||
|
expectedParams.put("search_type", searchRequest.searchType().name().toLowerCase(Locale.ROOT));
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchRequest.requestCache(randomBoolean());
|
||||||
|
expectedParams.put("request_cache", Boolean.toString(searchRequest.requestCache()));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchRequest.setBatchedReduceSize(randomIntBetween(2, Integer.MAX_VALUE));
|
||||||
|
}
|
||||||
|
expectedParams.put("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize()));
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchRequest.scroll(randomTimeValue());
|
||||||
|
expectedParams.put("scroll", searchRequest.scroll().keepAlive().getStringRep());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchRequest.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||||
|
}
|
||||||
|
expectedParams.put("ignore_unavailable", Boolean.toString(searchRequest.indicesOptions().ignoreUnavailable()));
|
||||||
|
expectedParams.put("allow_no_indices", Boolean.toString(searchRequest.indicesOptions().allowNoIndices()));
|
||||||
|
if (searchRequest.indicesOptions().expandWildcardsOpen() && searchRequest.indicesOptions().expandWildcardsClosed()) {
|
||||||
|
expectedParams.put("expand_wildcards", "open,closed");
|
||||||
|
} else if (searchRequest.indicesOptions().expandWildcardsOpen()) {
|
||||||
|
expectedParams.put("expand_wildcards", "open");
|
||||||
|
} else if (searchRequest.indicesOptions().expandWildcardsClosed()) {
|
||||||
|
expectedParams.put("expand_wildcards", "closed");
|
||||||
|
} else {
|
||||||
|
expectedParams.put("expand_wildcards", "none");
|
||||||
|
}
|
||||||
|
|
||||||
|
SearchSourceBuilder searchSourceBuilder = null;
|
||||||
|
if (frequently()) {
|
||||||
|
searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.from(randomIntBetween(0, Integer.MAX_VALUE));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.minScore(randomFloat());
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.explain(randomBoolean());
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.profile(randomBoolean());
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.highlighter(new HighlightBuilder().field(randomAlphaOfLengthBetween(3, 10)));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.query(new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10)));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.aggregation(new TermsAggregationBuilder(randomAlphaOfLengthBetween(3, 10), ValueType.STRING)
|
||||||
|
.field(randomAlphaOfLengthBetween(3, 10)));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion(randomAlphaOfLengthBetween(3, 10),
|
||||||
|
new CompletionSuggestionBuilder(randomAlphaOfLengthBetween(3, 10))));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.addRescorer(new QueryRescorerBuilder(
|
||||||
|
new TermQueryBuilder(randomAlphaOfLengthBetween(3, 10), randomAlphaOfLengthBetween(3, 10))));
|
||||||
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
|
searchSourceBuilder.collapse(new CollapseBuilder(randomAlphaOfLengthBetween(3, 10)));
|
||||||
|
}
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
}
|
||||||
|
|
||||||
|
Request request = Request.search(searchRequest);
|
||||||
|
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||||
|
String index = String.join(",", indices);
|
||||||
|
if (Strings.hasLength(index)) {
|
||||||
|
endpoint.add(index);
|
||||||
|
}
|
||||||
|
String type = String.join(",", types);
|
||||||
|
if (Strings.hasLength(type)) {
|
||||||
|
endpoint.add(type);
|
||||||
|
}
|
||||||
|
endpoint.add("_search");
|
||||||
|
assertEquals(endpoint.toString(), request.endpoint);
|
||||||
|
assertEquals(expectedParams, request.params);
|
||||||
|
if (searchSourceBuilder == null) {
|
||||||
|
assertNull(request.entity);
|
||||||
|
} else {
|
||||||
|
BytesReference expectedBytes = XContentHelper.toXContent(searchSourceBuilder, XContentType.JSON, false);
|
||||||
|
assertEquals(XContentType.JSON.mediaType(), request.entity.getContentType().getValue());
|
||||||
|
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(request.entity)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testParams() {
|
public void testParams() {
|
||||||
final int nbParams = randomIntBetween(0, 10);
|
final int nbParams = randomIntBetween(0, 10);
|
||||||
Request.Params params = Request.Params.builder();
|
Request.Params params = Request.Params.builder();
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.client;
|
package org.elasticsearch.client;
|
||||||
|
|
||||||
import com.fasterxml.jackson.core.JsonParseException;
|
import com.fasterxml.jackson.core.JsonParseException;
|
||||||
|
|
||||||
import org.apache.http.Header;
|
import org.apache.http.Header;
|
||||||
import org.apache.http.HttpEntity;
|
import org.apache.http.HttpEntity;
|
||||||
import org.apache.http.HttpHost;
|
import org.apache.http.HttpHost;
|
||||||
|
@ -51,6 +50,8 @@ import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
import org.elasticsearch.common.xcontent.cbor.CborXContent;
|
||||||
import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
import org.elasticsearch.search.aggregations.Aggregation;
|
||||||
|
import org.elasticsearch.search.suggest.Suggest;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.mockito.ArgumentMatcher;
|
import org.mockito.ArgumentMatcher;
|
||||||
|
@ -61,7 +62,9 @@ import org.mockito.internal.matchers.VarargMatcher;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.SocketTimeoutException;
|
import java.net.SocketTimeoutException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
@ -566,8 +569,18 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNamedXContents() {
|
public void testNamedXContents() {
|
||||||
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getNamedXContents();
|
List<NamedXContentRegistry.Entry> namedXContents = RestHighLevelClient.getDefaultNamedXContents();
|
||||||
assertEquals(0, namedXContents.size());
|
assertEquals(45, namedXContents.size());
|
||||||
|
Map<Class<?>, Integer> categories = new HashMap<>();
|
||||||
|
for (NamedXContentRegistry.Entry namedXContent : namedXContents) {
|
||||||
|
Integer counter = categories.putIfAbsent(namedXContent.categoryClass, 1);
|
||||||
|
if (counter != null) {
|
||||||
|
categories.put(namedXContent.categoryClass, counter + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertEquals(2, categories.size());
|
||||||
|
assertEquals(Integer.valueOf(42), categories.get(Aggregation.class));
|
||||||
|
assertEquals(Integer.valueOf(3), categories.get(Suggest.Suggestion.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class TrackingActionListener implements ActionListener<Integer> {
|
private static class TrackingActionListener implements ActionListener<Integer> {
|
||||||
|
|
|
@ -0,0 +1,395 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.client;
|
||||||
|
|
||||||
|
import org.apache.http.entity.ContentType;
|
||||||
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.ElasticsearchStatusException;
|
||||||
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
|
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||||
|
import org.elasticsearch.join.aggregations.Children;
|
||||||
|
import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder;
|
||||||
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
import org.elasticsearch.search.SearchHit;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.Range;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||||
|
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStats;
|
||||||
|
import org.elasticsearch.search.aggregations.matrix.stats.MatrixStatsAggregationBuilder;
|
||||||
|
import org.elasticsearch.search.aggregations.support.ValueType;
|
||||||
|
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
|
import org.elasticsearch.search.suggest.Suggest;
|
||||||
|
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||||
|
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.both;
|
||||||
|
import static org.hamcrest.Matchers.either;
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
|
import static org.hamcrest.Matchers.lessThan;
|
||||||
|
|
||||||
|
public class SearchIT extends ESRestHighLevelClientTestCase {
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void indexDocuments() throws IOException {
|
||||||
|
StringEntity doc1 = new StringEntity("{\"type\":\"type1\", \"num\":10, \"num2\":50}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/index/type/1", Collections.emptyMap(), doc1);
|
||||||
|
StringEntity doc2 = new StringEntity("{\"type\":\"type1\", \"num\":20, \"num2\":40}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/index/type/2", Collections.emptyMap(), doc2);
|
||||||
|
StringEntity doc3 = new StringEntity("{\"type\":\"type1\", \"num\":50, \"num2\":35}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/index/type/3", Collections.emptyMap(), doc3);
|
||||||
|
StringEntity doc4 = new StringEntity("{\"type\":\"type2\", \"num\":100, \"num2\":10}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/index/type/4", Collections.emptyMap(), doc4);
|
||||||
|
StringEntity doc5 = new StringEntity("{\"type\":\"type2\", \"num\":100, \"num2\":10}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/index/type/5", Collections.emptyMap(), doc5);
|
||||||
|
client().performRequest("POST", "/index/_refresh");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchNoQuery() throws IOException {
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getAggregations());
|
||||||
|
assertNull(searchResponse.getSuggest());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertEquals(5, searchResponse.getHits().totalHits);
|
||||||
|
assertEquals(5, searchResponse.getHits().getHits().length);
|
||||||
|
for (SearchHit searchHit : searchResponse.getHits().getHits()) {
|
||||||
|
assertEquals("index", searchHit.getIndex());
|
||||||
|
assertEquals("type", searchHit.getType());
|
||||||
|
assertThat(Integer.valueOf(searchHit.getId()), both(greaterThan(0)).and(lessThan(6)));
|
||||||
|
assertEquals(1.0f, searchHit.getScore(), 0);
|
||||||
|
assertEquals(-1L, searchHit.getVersion());
|
||||||
|
assertNotNull(searchHit.getSourceAsMap());
|
||||||
|
assertEquals(3, searchHit.getSourceAsMap().size());
|
||||||
|
assertTrue(searchHit.getSourceAsMap().containsKey("type"));
|
||||||
|
assertTrue(searchHit.getSourceAsMap().containsKey("num"));
|
||||||
|
assertTrue(searchHit.getSourceAsMap().containsKey("num2"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchMatchQuery() throws IOException {
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
searchRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10)));
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getAggregations());
|
||||||
|
assertNull(searchResponse.getSuggest());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertEquals(1, searchResponse.getHits().totalHits);
|
||||||
|
assertEquals(1, searchResponse.getHits().getHits().length);
|
||||||
|
assertThat(searchResponse.getHits().getMaxScore(), greaterThan(0f));
|
||||||
|
SearchHit searchHit = searchResponse.getHits().getHits()[0];
|
||||||
|
assertEquals("index", searchHit.getIndex());
|
||||||
|
assertEquals("type", searchHit.getType());
|
||||||
|
assertEquals("1", searchHit.getId());
|
||||||
|
assertThat(searchHit.getScore(), greaterThan(0f));
|
||||||
|
assertEquals(-1L, searchHit.getVersion());
|
||||||
|
assertNotNull(searchHit.getSourceAsMap());
|
||||||
|
assertEquals(3, searchHit.getSourceAsMap().size());
|
||||||
|
assertEquals("type1", searchHit.getSourceAsMap().get("type"));
|
||||||
|
assertEquals(50, searchHit.getSourceAsMap().get("num2"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchWithTermsAgg() throws IOException {
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword"));
|
||||||
|
searchSourceBuilder.size(0);
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getSuggest());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||||
|
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||||
|
Terms termsAgg = searchResponse.getAggregations().get("agg1");
|
||||||
|
assertEquals("agg1", termsAgg.getName());
|
||||||
|
assertEquals(2, termsAgg.getBuckets().size());
|
||||||
|
Terms.Bucket type1 = termsAgg.getBucketByKey("type1");
|
||||||
|
assertEquals(3, type1.getDocCount());
|
||||||
|
assertEquals(0, type1.getAggregations().asList().size());
|
||||||
|
Terms.Bucket type2 = termsAgg.getBucketByKey("type2");
|
||||||
|
assertEquals(2, type2.getDocCount());
|
||||||
|
assertEquals(0, type2.getAggregations().asList().size());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchWithRangeAgg() throws IOException {
|
||||||
|
{
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
searchSourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num"));
|
||||||
|
searchSourceBuilder.size(0);
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
|
||||||
|
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
|
||||||
|
() -> execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync));
|
||||||
|
assertEquals(RestStatus.BAD_REQUEST, exception.status());
|
||||||
|
}
|
||||||
|
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
searchSourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")
|
||||||
|
.addRange("first", 0, 30).addRange("second", 31, 200));
|
||||||
|
searchSourceBuilder.size(0);
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getSuggest());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||||
|
assertEquals(5, searchResponse.getHits().totalHits);
|
||||||
|
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||||
|
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||||
|
Range rangeAgg = searchResponse.getAggregations().get("agg1");
|
||||||
|
assertEquals("agg1", rangeAgg.getName());
|
||||||
|
assertEquals(2, rangeAgg.getBuckets().size());
|
||||||
|
{
|
||||||
|
Range.Bucket bucket = rangeAgg.getBuckets().get(0);
|
||||||
|
assertEquals("first", bucket.getKeyAsString());
|
||||||
|
assertEquals(2, bucket.getDocCount());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Range.Bucket bucket = rangeAgg.getBuckets().get(1);
|
||||||
|
assertEquals("second", bucket.getKeyAsString());
|
||||||
|
assertEquals(3, bucket.getDocCount());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchWithTermsAndRangeAgg() throws IOException {
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
TermsAggregationBuilder agg = new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword");
|
||||||
|
agg.subAggregation(new RangeAggregationBuilder("subagg").field("num")
|
||||||
|
.addRange("first", 0, 30).addRange("second", 31, 200));
|
||||||
|
searchSourceBuilder.aggregation(agg);
|
||||||
|
searchSourceBuilder.size(0);
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getSuggest());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||||
|
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||||
|
Terms termsAgg = searchResponse.getAggregations().get("agg1");
|
||||||
|
assertEquals("agg1", termsAgg.getName());
|
||||||
|
assertEquals(2, termsAgg.getBuckets().size());
|
||||||
|
Terms.Bucket type1 = termsAgg.getBucketByKey("type1");
|
||||||
|
assertEquals(3, type1.getDocCount());
|
||||||
|
assertEquals(1, type1.getAggregations().asList().size());
|
||||||
|
{
|
||||||
|
Range rangeAgg = type1.getAggregations().get("subagg");
|
||||||
|
assertEquals(2, rangeAgg.getBuckets().size());
|
||||||
|
{
|
||||||
|
Range.Bucket bucket = rangeAgg.getBuckets().get(0);
|
||||||
|
assertEquals("first", bucket.getKeyAsString());
|
||||||
|
assertEquals(2, bucket.getDocCount());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Range.Bucket bucket = rangeAgg.getBuckets().get(1);
|
||||||
|
assertEquals("second", bucket.getKeyAsString());
|
||||||
|
assertEquals(1, bucket.getDocCount());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Terms.Bucket type2 = termsAgg.getBucketByKey("type2");
|
||||||
|
assertEquals(2, type2.getDocCount());
|
||||||
|
assertEquals(1, type2.getAggregations().asList().size());
|
||||||
|
{
|
||||||
|
Range rangeAgg = type2.getAggregations().get("subagg");
|
||||||
|
assertEquals(2, rangeAgg.getBuckets().size());
|
||||||
|
{
|
||||||
|
Range.Bucket bucket = rangeAgg.getBuckets().get(0);
|
||||||
|
assertEquals("first", bucket.getKeyAsString());
|
||||||
|
assertEquals(0, bucket.getDocCount());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
Range.Bucket bucket = rangeAgg.getBuckets().get(1);
|
||||||
|
assertEquals("second", bucket.getKeyAsString());
|
||||||
|
assertEquals(2, bucket.getDocCount());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchWithMatrixStats() throws IOException {
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
searchSourceBuilder.aggregation(new MatrixStatsAggregationBuilder("agg1").fields(Arrays.asList("num", "num2")));
|
||||||
|
searchSourceBuilder.size(0);
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getSuggest());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||||
|
assertEquals(5, searchResponse.getHits().totalHits);
|
||||||
|
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||||
|
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||||
|
assertEquals(1, searchResponse.getAggregations().asList().size());
|
||||||
|
MatrixStats matrixStats = searchResponse.getAggregations().get("agg1");
|
||||||
|
assertEquals(5, matrixStats.getFieldCount("num"));
|
||||||
|
assertEquals(56d, matrixStats.getMean("num"), 0d);
|
||||||
|
assertEquals(1830d, matrixStats.getVariance("num"), 0d);
|
||||||
|
assertEquals(0.09340198804973057, matrixStats.getSkewness("num"), 0d);
|
||||||
|
assertEquals(1.2741646510794589, matrixStats.getKurtosis("num"), 0d);
|
||||||
|
assertEquals(5, matrixStats.getFieldCount("num2"));
|
||||||
|
assertEquals(29d, matrixStats.getMean("num2"), 0d);
|
||||||
|
assertEquals(330d, matrixStats.getVariance("num2"), 0d);
|
||||||
|
assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 0d);
|
||||||
|
assertEquals(1.3517561983471074, matrixStats.getKurtosis("num2"), 0d);
|
||||||
|
assertEquals(-767.5, matrixStats.getCovariance("num", "num2"), 0d);
|
||||||
|
assertEquals(-0.9876336291667923, matrixStats.getCorrelation("num", "num2"), 0d);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchWithParentJoin() throws IOException {
|
||||||
|
StringEntity parentMapping = new StringEntity("{\n" +
|
||||||
|
" \"mappings\": {\n" +
|
||||||
|
" \"answer\" : {\n" +
|
||||||
|
" \"_parent\" : {\n" +
|
||||||
|
" \"type\" : \"question\"\n" +
|
||||||
|
" }\n" +
|
||||||
|
" }\n" +
|
||||||
|
" },\n" +
|
||||||
|
" \"settings\": {\n" +
|
||||||
|
" \"index.mapping.single_type\": false" +
|
||||||
|
" }\n" +
|
||||||
|
"}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/child_example", Collections.emptyMap(), parentMapping);
|
||||||
|
StringEntity questionDoc = new StringEntity("{\n" +
|
||||||
|
" \"body\": \"<p>I have Windows 2003 server and i bought a new Windows 2008 server...\",\n" +
|
||||||
|
" \"title\": \"Whats the best way to file transfer my site from server to a newer one?\",\n" +
|
||||||
|
" \"tags\": [\n" +
|
||||||
|
" \"windows-server-2003\",\n" +
|
||||||
|
" \"windows-server-2008\",\n" +
|
||||||
|
" \"file-transfer\"\n" +
|
||||||
|
" ]\n" +
|
||||||
|
"}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/child_example/question/1", Collections.emptyMap(), questionDoc);
|
||||||
|
StringEntity answerDoc1 = new StringEntity("{\n" +
|
||||||
|
" \"owner\": {\n" +
|
||||||
|
" \"location\": \"Norfolk, United Kingdom\",\n" +
|
||||||
|
" \"display_name\": \"Sam\",\n" +
|
||||||
|
" \"id\": 48\n" +
|
||||||
|
" },\n" +
|
||||||
|
" \"body\": \"<p>Unfortunately you're pretty much limited to FTP...\",\n" +
|
||||||
|
" \"creation_date\": \"2009-05-04T13:45:37.030\"\n" +
|
||||||
|
"}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "child_example/answer/1", Collections.singletonMap("parent", "1"), answerDoc1);
|
||||||
|
StringEntity answerDoc2 = new StringEntity("{\n" +
|
||||||
|
" \"owner\": {\n" +
|
||||||
|
" \"location\": \"Norfolk, United Kingdom\",\n" +
|
||||||
|
" \"display_name\": \"Troll\",\n" +
|
||||||
|
" \"id\": 49\n" +
|
||||||
|
" },\n" +
|
||||||
|
" \"body\": \"<p>Use Linux...\",\n" +
|
||||||
|
" \"creation_date\": \"2009-05-05T13:45:37.030\"\n" +
|
||||||
|
"}", ContentType.APPLICATION_JSON);
|
||||||
|
client().performRequest("PUT", "/child_example/answer/2", Collections.singletonMap("parent", "1"), answerDoc2);
|
||||||
|
client().performRequest("POST", "/_refresh");
|
||||||
|
|
||||||
|
TermsAggregationBuilder leafTermAgg = new TermsAggregationBuilder("top-names", ValueType.STRING)
|
||||||
|
.field("owner.display_name.keyword").size(10);
|
||||||
|
ChildrenAggregationBuilder childrenAgg = new ChildrenAggregationBuilder("to-answers", "answer").subAggregation(leafTermAgg);
|
||||||
|
TermsAggregationBuilder termsAgg = new TermsAggregationBuilder("top-tags", ValueType.STRING).field("tags.keyword")
|
||||||
|
.size(10).subAggregation(childrenAgg);
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
searchSourceBuilder.size(0).aggregation(termsAgg);
|
||||||
|
SearchRequest searchRequest = new SearchRequest("child_example");
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getSuggest());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||||
|
assertEquals(3, searchResponse.getHits().totalHits);
|
||||||
|
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||||
|
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||||
|
assertEquals(1, searchResponse.getAggregations().asList().size());
|
||||||
|
Terms terms = searchResponse.getAggregations().get("top-tags");
|
||||||
|
assertEquals(0, terms.getDocCountError());
|
||||||
|
assertEquals(0, terms.getSumOfOtherDocCounts());
|
||||||
|
assertEquals(3, terms.getBuckets().size());
|
||||||
|
for (Terms.Bucket bucket : terms.getBuckets()) {
|
||||||
|
assertThat(bucket.getKeyAsString(),
|
||||||
|
either(equalTo("file-transfer")).or(equalTo("windows-server-2003")).or(equalTo("windows-server-2008")));
|
||||||
|
assertEquals(1, bucket.getDocCount());
|
||||||
|
assertEquals(1, bucket.getAggregations().asList().size());
|
||||||
|
Children children = bucket.getAggregations().get("to-answers");
|
||||||
|
assertEquals(2, children.getDocCount());
|
||||||
|
assertEquals(1, children.getAggregations().asList().size());
|
||||||
|
Terms leafTerms = children.getAggregations().get("top-names");
|
||||||
|
assertEquals(0, leafTerms.getDocCountError());
|
||||||
|
assertEquals(0, leafTerms.getSumOfOtherDocCounts());
|
||||||
|
assertEquals(2, leafTerms.getBuckets().size());
|
||||||
|
assertEquals(2, leafTerms.getBuckets().size());
|
||||||
|
Terms.Bucket sam = leafTerms.getBucketByKey("Sam");
|
||||||
|
assertEquals(1, sam.getDocCount());
|
||||||
|
Terms.Bucket troll = leafTerms.getBucketByKey("Troll");
|
||||||
|
assertEquals(1, troll.getDocCount());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSearchWithSuggest() throws IOException {
|
||||||
|
SearchRequest searchRequest = new SearchRequest();
|
||||||
|
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||||
|
searchSourceBuilder.suggest(new SuggestBuilder().addSuggestion("sugg1", new PhraseSuggestionBuilder("type"))
|
||||||
|
.setGlobalText("type"));
|
||||||
|
searchSourceBuilder.size(0);
|
||||||
|
searchRequest.source(searchSourceBuilder);
|
||||||
|
|
||||||
|
SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync);
|
||||||
|
assertSearchHeader(searchResponse);
|
||||||
|
assertNull(searchResponse.getAggregations());
|
||||||
|
assertEquals(Collections.emptyMap(), searchResponse.getProfileResults());
|
||||||
|
assertEquals(0, searchResponse.getHits().totalHits);
|
||||||
|
assertEquals(0f, searchResponse.getHits().getMaxScore(), 0f);
|
||||||
|
assertEquals(0, searchResponse.getHits().getHits().length);
|
||||||
|
assertEquals(1, searchResponse.getSuggest().size());
|
||||||
|
|
||||||
|
Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> sugg = searchResponse
|
||||||
|
.getSuggest().iterator().next();
|
||||||
|
assertEquals("sugg1", sugg.getName());
|
||||||
|
for (Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option> options : sugg) {
|
||||||
|
assertEquals("type", options.getText().string());
|
||||||
|
assertEquals(0, options.getOffset());
|
||||||
|
assertEquals(4, options.getLength());
|
||||||
|
assertEquals(2 ,options.getOptions().size());
|
||||||
|
for (Suggest.Suggestion.Entry.Option option : options) {
|
||||||
|
assertThat(option.getScore(), greaterThan(0f));
|
||||||
|
assertThat(option.getText().string(), either(equalTo("type1")).or(equalTo("type2")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void assertSearchHeader(SearchResponse searchResponse) {
|
||||||
|
assertThat(searchResponse.getTook().nanos(), greaterThan(0L));
|
||||||
|
assertEquals(0, searchResponse.getFailedShards());
|
||||||
|
assertThat(searchResponse.getTotalShards(), greaterThan(0));
|
||||||
|
assertEquals(searchResponse.getTotalShards(), searchResponse.getSuccessfulShards());
|
||||||
|
assertEquals(0, searchResponse.getShardFailures().length);
|
||||||
|
}
|
||||||
|
}
|
|
@ -55,27 +55,29 @@ public class PreBuiltTransportClient extends TransportClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Netty wants to do some unsafe things like use unsafe and replace a private field. This method disables these things by default, but
|
* Netty wants to do some unwelcome things like use unsafe and replace a private field, or use a poorly considered buffer recycler. This
|
||||||
* can be overridden by setting the corresponding system properties.
|
* method disables these things by default, but can be overridden by setting the corresponding system properties.
|
||||||
*/
|
*/
|
||||||
@SuppressForbidden(reason = "set system properties to configure Netty")
|
|
||||||
private static void initializeNetty() {
|
private static void initializeNetty() {
|
||||||
final String noUnsafeKey = "io.netty.noUnsafe";
|
/*
|
||||||
final String noUnsafe = System.getProperty(noUnsafeKey);
|
* We disable three pieces of Netty functionality here:
|
||||||
if (noUnsafe == null) {
|
* - we disable Netty from being unsafe
|
||||||
// disable Netty from using unsafe
|
* - we disable Netty from replacing the selector key set
|
||||||
// while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or
|
* - we disable Netty from using the recycler
|
||||||
// the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here
|
*
|
||||||
System.setProperty(noUnsafeKey, Boolean.toString(true));
|
* While permissions are needed to read and set these, the permissions needed here are innocuous and thus should simply be granted
|
||||||
|
* rather than us handling a security exception here.
|
||||||
|
*/
|
||||||
|
setSystemPropertyIfUnset("io.netty.noUnsafe", Boolean.toString(true));
|
||||||
|
setSystemPropertyIfUnset("io.netty.noKeySetOptimization", Boolean.toString(true));
|
||||||
|
setSystemPropertyIfUnset("io.netty.recycler.maxCapacityPerThread", Integer.toString(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
final String noKeySetOptimizationKey = "io.netty.noKeySetOptimization";
|
@SuppressForbidden(reason = "set system properties to configure Netty")
|
||||||
final String noKeySetOptimization = System.getProperty(noKeySetOptimizationKey);
|
private static void setSystemPropertyIfUnset(final String key, final String value) {
|
||||||
if (noKeySetOptimization == null) {
|
final String currentValue = System.getProperty(key);
|
||||||
// disable Netty from replacing the selector key set
|
if (currentValue == null) {
|
||||||
// while permissions are needed to set this, if a security exception is thrown the permission needed can either be granted or
|
System.setProperty(key, value);
|
||||||
// the system property can be set directly before starting the JVM; therefore, we do not catch a security exception here
|
|
||||||
System.setProperty(noKeySetOptimizationKey, Boolean.toString(true));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -277,43 +277,3 @@ if (isEclipse == false || project.path == ":core-tests") {
|
||||||
check.dependsOn integTest
|
check.dependsOn integTest
|
||||||
integTest.mustRunAfter test
|
integTest.mustRunAfter test
|
||||||
}
|
}
|
||||||
|
|
||||||
task('verifyVersions') {
|
|
||||||
description 'Verifies that all released versions that are indexed compatible are listed in Version.java.'
|
|
||||||
group 'Verification'
|
|
||||||
enabled = false == gradle.startParameter.isOffline()
|
|
||||||
doLast {
|
|
||||||
// Read the list from maven central
|
|
||||||
Node xml
|
|
||||||
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
|
||||||
xml = new XmlParser().parse(s)
|
|
||||||
}
|
|
||||||
Set<String> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ })
|
|
||||||
|
|
||||||
// Limit the known versions to those that should be wire compatible
|
|
||||||
String currentVersion = versions.elasticsearch.minus('-SNAPSHOT')
|
|
||||||
int prevMajor = Integer.parseInt(currentVersion.split('\\.')[0]) - 1
|
|
||||||
if (prevMajor == 4) {
|
|
||||||
// 4 didn't exist, it was 2.
|
|
||||||
prevMajor = 2;
|
|
||||||
}
|
|
||||||
knownVersions = knownVersions.findAll { Integer.parseInt(it.split('\\.')[0]) >= prevMajor }
|
|
||||||
|
|
||||||
/* Limit the listed versions to those that have been marked as released.
|
|
||||||
* Versions not marked as released don't get the same testing and we want
|
|
||||||
* to make sure that we flip all unreleased versions to released as soon
|
|
||||||
* as possible after release. */
|
|
||||||
Set<String> actualVersions = new TreeSet<>(
|
|
||||||
indexCompatVersions
|
|
||||||
.findAll { false == it.unreleased }
|
|
||||||
.collect { it.toString() })
|
|
||||||
|
|
||||||
// Finally, compare!
|
|
||||||
if (!knownVersions.equals(actualVersions)) {
|
|
||||||
throw new GradleException("out-of-date versions\nActual :" +
|
|
||||||
actualVersions + "\nExpected:" + knownVersions +
|
|
||||||
"; update Version.java")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
check.dependsOn(verifyVersions)
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
e69234c2e898d86a53edbe8d22e33bebc45286cd
|
|
|
@ -0,0 +1 @@
|
||||||
|
5e191674c50c9d99c9838da52cbf67c411998f4e
|
|
@ -1 +0,0 @@
|
||||||
48172a8e1fe6562f55ab671d42af53652794d5df
|
|
|
@ -0,0 +1 @@
|
||||||
|
45bc34ab640d5d1a7491b523631b902f20db5384
|
|
@ -1 +0,0 @@
|
||||||
3dab251d4c7ab4ff5095e5f1d1e127ec2cf3c07d
|
|
|
@ -0,0 +1 @@
|
||||||
|
b44d86e9077443c3ba4918a85603734461c6b448
|
|
@ -1 +0,0 @@
|
||||||
c01ae8a23b733d75d058a76bd85fcb49b9fd06fd
|
|
|
@ -0,0 +1 @@
|
||||||
|
409b616d40e2041a02890b2dc477ed845e3121e9
|
|
@ -1 +0,0 @@
|
||||||
c53df048b97946fe66035505306b5651b702adb1
|
|
|
@ -0,0 +1 @@
|
||||||
|
cfac105541315e2ca54955f681b410a7aa3bbb9d
|
|
@ -1 +0,0 @@
|
||||||
1ecb349ba29abab75359e5125ac8a94fc81441d5
|
|
|
@ -0,0 +1 @@
|
||||||
|
993c1331130dd26c632b964fd8caac259bb9f3fc
|
|
@ -1 +0,0 @@
|
||||||
e5f53b38652b1284ff254fba39e624ec117aef7d
|
|
|
@ -0,0 +1 @@
|
||||||
|
ec1460a28850410112a6349a7fff27df31242295
|
|
@ -1 +0,0 @@
|
||||||
2f340ed3f46d6b4c89fa31975b675c19028c15eb
|
|
|
@ -0,0 +1 @@
|
||||||
|
57d342dbe68cf05361ccfda6bb76f2410cac900b
|
|
@ -1 +0,0 @@
|
||||||
a13862fb62cc1e516d16d6b6bb3cdb906c4925f6
|
|
|
@ -0,0 +1 @@
|
||||||
|
5ed10847b6a2353ac66decd5a2ee1a1d34353049
|
|
@ -1 +0,0 @@
|
||||||
4e014f72a588453bae7dd1a555d741cf3bf39032
|
|
|
@ -0,0 +1 @@
|
||||||
|
23ce6c2ea59287d8fe4fe31f466e9a58a1efe7b5
|
|
@ -1 +0,0 @@
|
||||||
5e87d61c604d6b1c0ee5c38f09441d1b8b9c8c2b
|
|
|
@ -0,0 +1 @@
|
||||||
|
78bda71c8e65428927136f81112a031aa9cd04d4
|
|
@ -1 +0,0 @@
|
||||||
be14aa163b339403d8ec904493c1be5dfa9baeaf
|
|
|
@ -0,0 +1 @@
|
||||||
|
1e7ea95e6197176015b13551c7496be4867ede45
|
|
@ -1 +0,0 @@
|
||||||
a2c13be0fe4c5a98a30ec6ae673be1442409817c
|
|
|
@ -0,0 +1 @@
|
||||||
|
5ae4ecd6c478456395ae9a3f954b8afc13629bb9
|
|
@ -1 +0,0 @@
|
||||||
92b8282e474845fdae31f9f239f953bc7164401f
|
|
|
@ -0,0 +1 @@
|
||||||
|
d5d1a81fc290b9660a49557f848dc2a3c4f2048b
|
|
@ -1 +0,0 @@
|
||||||
1c4aaea267ed41657ebf01769bfddbcab5b27414
|
|
|
@ -0,0 +1 @@
|
||||||
|
d77cdd8f2782062a3b4c319c64f0fa4d804aafed
|
|
@ -0,0 +1,201 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.lucene.analysis.miscellaneous;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.FilteringTokenFilter;
|
||||||
|
import org.apache.lucene.analysis.TokenFilter;
|
||||||
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.elasticsearch.common.hash.MurmurHash3;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Inspects token streams for duplicate sequences of tokens. Token sequences
|
||||||
|
* have a minimum length - 6 is a good heuristic as it avoids filtering common
|
||||||
|
* idioms/phrases but detects longer sections that are typical of cut+paste
|
||||||
|
* copies of text.
|
||||||
|
*
|
||||||
|
* <p>
|
||||||
|
* Internally each token is hashed/moduloed into a single byte (so 256 possible
|
||||||
|
* values for each token) and then recorded in a trie of seen byte sequences
|
||||||
|
* using a {@link DuplicateByteSequenceSpotter}. This trie is passed into the
|
||||||
|
* TokenFilter constructor so a single object can be reused across multiple
|
||||||
|
* documents.
|
||||||
|
*
|
||||||
|
* <p>
|
||||||
|
* The emitDuplicates setting controls if duplicate tokens are filtered from
|
||||||
|
* results or are output (the {@link DuplicateSequenceAttribute} attribute can
|
||||||
|
* be used to inspect the number of prior sightings when emitDuplicates is true)
|
||||||
|
*/
|
||||||
|
public class DeDuplicatingTokenFilter extends FilteringTokenFilter {
|
||||||
|
private final DuplicateSequenceAttribute seqAtt = addAttribute(DuplicateSequenceAttribute.class);
|
||||||
|
private final boolean emitDuplicates;
|
||||||
|
static final MurmurHash3.Hash128 seed = new MurmurHash3.Hash128();
|
||||||
|
|
||||||
|
public DeDuplicatingTokenFilter(TokenStream in, DuplicateByteSequenceSpotter byteStreamDuplicateSpotter) {
|
||||||
|
this(in, byteStreamDuplicateSpotter, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param in
|
||||||
|
* The input token stream
|
||||||
|
* @param byteStreamDuplicateSpotter
|
||||||
|
* object which retains trie of token sequences
|
||||||
|
* @param emitDuplicates
|
||||||
|
* true if duplicate tokens are to be emitted (use
|
||||||
|
* {@link DuplicateSequenceAttribute} attribute to inspect number
|
||||||
|
* of prior sightings of tokens as part of a sequence).
|
||||||
|
*/
|
||||||
|
public DeDuplicatingTokenFilter(TokenStream in, DuplicateByteSequenceSpotter byteStreamDuplicateSpotter, boolean emitDuplicates) {
|
||||||
|
super(new DuplicateTaggingFilter(byteStreamDuplicateSpotter, in));
|
||||||
|
this.emitDuplicates = emitDuplicates;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected boolean accept() throws IOException {
|
||||||
|
return emitDuplicates || seqAtt.getNumPriorUsesInASequence() < 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class DuplicateTaggingFilter extends TokenFilter {
|
||||||
|
private final DuplicateSequenceAttribute seqAtt = addAttribute(DuplicateSequenceAttribute.class);
|
||||||
|
|
||||||
|
TermToBytesRefAttribute termBytesAtt = addAttribute(TermToBytesRefAttribute.class);
|
||||||
|
private DuplicateByteSequenceSpotter byteStreamDuplicateSpotter;
|
||||||
|
private ArrayList<State> allTokens;
|
||||||
|
int pos = 0;
|
||||||
|
private final int windowSize;
|
||||||
|
|
||||||
|
protected DuplicateTaggingFilter(DuplicateByteSequenceSpotter byteStreamDuplicateSpotter, TokenStream input) {
|
||||||
|
super(input);
|
||||||
|
this.byteStreamDuplicateSpotter = byteStreamDuplicateSpotter;
|
||||||
|
this.windowSize = DuplicateByteSequenceSpotter.TREE_DEPTH;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final boolean incrementToken() throws IOException {
|
||||||
|
if (allTokens == null) {
|
||||||
|
loadAllTokens();
|
||||||
|
}
|
||||||
|
clearAttributes();
|
||||||
|
if (pos < allTokens.size()) {
|
||||||
|
State earlierToken = allTokens.get(pos);
|
||||||
|
pos++;
|
||||||
|
restoreState(earlierToken);
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void loadAllTokens() throws IOException {
|
||||||
|
// TODO consider changing this implementation to emit tokens as-we-go
|
||||||
|
// rather than buffering all. However this array is perhaps not the
|
||||||
|
// bulk of memory usage (in practice the dupSequenceSpotter requires
|
||||||
|
// ~5x the original content size in its internal tree ).
|
||||||
|
allTokens = new ArrayList<State>(256);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given the bytes 123456123456 and a duplicate sequence size of 6
|
||||||
|
* the byteStreamDuplicateSpotter will only flag the final byte as
|
||||||
|
* part of a duplicate sequence due to the byte-at-a-time streaming
|
||||||
|
* nature of its assessments. When this happens we retain a buffer
|
||||||
|
* of the last 6 tokens so that we can mark the states of prior
|
||||||
|
* tokens (bytes 7 to 11) as also being duplicates
|
||||||
|
*/
|
||||||
|
|
||||||
|
pos = 0;
|
||||||
|
boolean isWrapped = false;
|
||||||
|
State priorStatesBuffer[] = new State[windowSize];
|
||||||
|
short priorMaxNumSightings[] = new short[windowSize];
|
||||||
|
int cursor = 0;
|
||||||
|
while (input.incrementToken()) {
|
||||||
|
BytesRef bytesRef = termBytesAtt.getBytesRef();
|
||||||
|
long tokenHash = MurmurHash3.hash128(bytesRef.bytes, bytesRef.offset, bytesRef.length, 0, seed).h1;
|
||||||
|
byte tokenByte = (byte) (tokenHash & 0xFF);
|
||||||
|
short numSightings = byteStreamDuplicateSpotter.addByte(tokenByte);
|
||||||
|
priorStatesBuffer[cursor] = captureState();
|
||||||
|
// Revise prior captured State objects if the latest
|
||||||
|
// token is marked as a duplicate
|
||||||
|
if (numSightings >= 1) {
|
||||||
|
int numLengthsToRecord = windowSize;
|
||||||
|
int pos = cursor;
|
||||||
|
while (numLengthsToRecord > 0) {
|
||||||
|
if (pos < 0) {
|
||||||
|
pos = windowSize - 1;
|
||||||
|
}
|
||||||
|
priorMaxNumSightings[pos] = (short) Math.max(priorMaxNumSightings[pos], numSightings);
|
||||||
|
numLengthsToRecord--;
|
||||||
|
pos--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Reposition cursor to next free slot
|
||||||
|
cursor++;
|
||||||
|
if (cursor >= windowSize) {
|
||||||
|
// wrap around the buffer
|
||||||
|
cursor = 0;
|
||||||
|
isWrapped = true;
|
||||||
|
}
|
||||||
|
// clean out the end of the tail that we may overwrite if the
|
||||||
|
// next iteration adds a new head
|
||||||
|
if (isWrapped) {
|
||||||
|
// tokenPos is now positioned on tail - emit any valid
|
||||||
|
// tokens we may about to overwrite in the next iteration
|
||||||
|
if (priorStatesBuffer[cursor] != null) {
|
||||||
|
recordLengthInfoState(priorMaxNumSightings, priorStatesBuffer, cursor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // end loop reading all tokens from stream
|
||||||
|
|
||||||
|
// Flush the buffered tokens
|
||||||
|
int pos = isWrapped ? nextAfter(cursor) : 0;
|
||||||
|
while (pos != cursor) {
|
||||||
|
recordLengthInfoState(priorMaxNumSightings, priorStatesBuffer, pos);
|
||||||
|
pos = nextAfter(pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private int nextAfter(int pos) {
|
||||||
|
pos++;
|
||||||
|
if (pos >= windowSize) {
|
||||||
|
pos = 0;
|
||||||
|
}
|
||||||
|
return pos;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void recordLengthInfoState(short[] maxNumSightings, State[] tokenStates, int cursor) {
|
||||||
|
if (maxNumSightings[cursor] > 0) {
|
||||||
|
// We need to patch in the max sequence length we recorded at
|
||||||
|
// this position into the token state
|
||||||
|
restoreState(tokenStates[cursor]);
|
||||||
|
seqAtt.setNumPriorUsesInASequence(maxNumSightings[cursor]);
|
||||||
|
maxNumSightings[cursor] = 0;
|
||||||
|
// record the patched state
|
||||||
|
tokenStates[cursor] = captureState();
|
||||||
|
}
|
||||||
|
allTokens.add(tokenStates[cursor]);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,311 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.lucene.analysis.miscellaneous;
|
||||||
|
|
||||||
|
import org.apache.lucene.util.RamUsageEstimator;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A Trie structure for analysing byte streams for duplicate sequences. Bytes
|
||||||
|
* from a stream are added one at a time using the addByte method and the number
|
||||||
|
* of times it has been seen as part of a sequence is returned.
|
||||||
|
*
|
||||||
|
* The minimum required length for a duplicate sequence detected is 6 bytes.
|
||||||
|
*
|
||||||
|
* The design goals are to maximize speed of lookup while minimizing the space
|
||||||
|
* required to do so. This has led to a hybrid solution for representing the
|
||||||
|
* bytes that make up a sequence in the trie.
|
||||||
|
*
|
||||||
|
* If we have 6 bytes in sequence e.g. abcdef then they are represented as
|
||||||
|
* object nodes in the tree as follows:
|
||||||
|
* <p>
|
||||||
|
* (a)-(b)-(c)-(def as an int)
|
||||||
|
* <p>
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* {@link RootTreeNode} objects are used for the first two levels of the tree
|
||||||
|
* (representing bytes a and b in the example sequence). The combinations of
|
||||||
|
* objects at these 2 levels are few so internally these objects allocate an
|
||||||
|
* array of 256 child node objects to quickly address children by indexing
|
||||||
|
* directly into the densely packed array using a byte value. The third level in
|
||||||
|
* the tree holds {@link LightweightTreeNode} nodes that have few children
|
||||||
|
* (typically much less than 256) and so use a dynamically-grown array to hold
|
||||||
|
* child nodes as simple int primitives. These ints represent the final 3 bytes
|
||||||
|
* of a sequence and also hold a count of the number of times the entire sequence
|
||||||
|
* path has been visited (count is a single byte).
|
||||||
|
* <p>
|
||||||
|
* The Trie grows indefinitely as more content is added and while theoretically
|
||||||
|
* it could be massive (a 6-depth tree could produce 256^6 nodes) non-random
|
||||||
|
* content e.g English text contains fewer variations.
|
||||||
|
* <p>
|
||||||
|
* In future we may look at using one of these strategies when memory is tight:
|
||||||
|
* <ol>
|
||||||
|
* <li>auto-pruning methods to remove less-visited parts of the tree
|
||||||
|
* <li>auto-reset to wipe the whole tree and restart when a memory threshold is
|
||||||
|
* reached
|
||||||
|
* <li>halting any growth of the tree
|
||||||
|
* </ol>
|
||||||
|
*
|
||||||
|
* Tests on real-world-text show that the size of the tree is a multiple of the
|
||||||
|
* input text where that multiplier varies between 10 and 5 times as the content
|
||||||
|
* size increased from 10 to 100 megabytes of content.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class DuplicateByteSequenceSpotter {
|
||||||
|
public static final int TREE_DEPTH = 6;
|
||||||
|
// The maximum number of repetitions that are counted
|
||||||
|
public static final int MAX_HIT_COUNT = 255;
|
||||||
|
private final TreeNode root;
|
||||||
|
private boolean sequenceBufferFilled = false;
|
||||||
|
private final byte[] sequenceBuffer = new byte[TREE_DEPTH];
|
||||||
|
private int nextFreePos = 0;
|
||||||
|
|
||||||
|
// ==Performance info
|
||||||
|
private final int[] nodesAllocatedByDepth;
|
||||||
|
private int nodesResizedByDepth;
|
||||||
|
// ==== RAM usage estimation settings ====
|
||||||
|
private long bytesAllocated;
|
||||||
|
// Root node object plus inner-class reference to containing "this"
|
||||||
|
// (profiler suggested this was a cost)
|
||||||
|
static final long TREE_NODE_OBJECT_SIZE = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||||
|
// A TreeNode specialization with an array ref (dynamically allocated and
|
||||||
|
// fixed-size)
|
||||||
|
static final long ROOT_TREE_NODE_OBJECT_SIZE = TREE_NODE_OBJECT_SIZE + RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||||
|
// A KeyedTreeNode specialization with an array ref (dynamically allocated
|
||||||
|
// and grown)
|
||||||
|
static final long LIGHTWEIGHT_TREE_NODE_OBJECT_SIZE = TREE_NODE_OBJECT_SIZE + RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||||
|
// A KeyedTreeNode specialization with a short-based hit count and a
|
||||||
|
// sequence of bytes encoded as an int
|
||||||
|
static final long LEAF_NODE_OBJECT_SIZE = TREE_NODE_OBJECT_SIZE + Short.BYTES + Integer.BYTES;
|
||||||
|
|
||||||
|
public DuplicateByteSequenceSpotter() {
|
||||||
|
this.nodesAllocatedByDepth = new int[4];
|
||||||
|
this.bytesAllocated = 0;
|
||||||
|
root = new RootTreeNode((byte) 1, null, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reset the sequence detection logic to avoid any continuation of the
|
||||||
|
* immediately previous bytes. A minimum of dupSequenceSize bytes need to be
|
||||||
|
* added before any new duplicate sequences will be reported.
|
||||||
|
* Hit counts are not reset by calling this method.
|
||||||
|
*/
|
||||||
|
public void startNewSequence() {
|
||||||
|
sequenceBufferFilled = false;
|
||||||
|
nextFreePos = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add a byte to the sequence.
|
||||||
|
* @param b
|
||||||
|
* the next byte in a sequence
|
||||||
|
* @return number of times this byte and the preceding 6 bytes have been
|
||||||
|
* seen before as a sequence (only counts up to 255)
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public short addByte(byte b) {
|
||||||
|
// Add latest byte to circular buffer
|
||||||
|
sequenceBuffer[nextFreePos] = b;
|
||||||
|
nextFreePos++;
|
||||||
|
if (nextFreePos >= sequenceBuffer.length) {
|
||||||
|
nextFreePos = 0;
|
||||||
|
sequenceBufferFilled = true;
|
||||||
|
}
|
||||||
|
if (sequenceBufferFilled == false) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
TreeNode node = root;
|
||||||
|
// replay updated sequence of bytes represented in the circular
|
||||||
|
// buffer starting from the tail
|
||||||
|
int p = nextFreePos;
|
||||||
|
|
||||||
|
// The first tier of nodes are addressed using individual bytes from the
|
||||||
|
// sequence
|
||||||
|
node = node.add(sequenceBuffer[p], 0);
|
||||||
|
p = nextBufferPos(p);
|
||||||
|
node = node.add(sequenceBuffer[p], 1);
|
||||||
|
p = nextBufferPos(p);
|
||||||
|
node = node.add(sequenceBuffer[p], 2);
|
||||||
|
|
||||||
|
// The final 3 bytes in the sequence are represented in an int
|
||||||
|
// where the 4th byte will contain a hit count.
|
||||||
|
|
||||||
|
|
||||||
|
p = nextBufferPos(p);
|
||||||
|
int sequence = 0xFF & sequenceBuffer[p];
|
||||||
|
p = nextBufferPos(p);
|
||||||
|
sequence = sequence << 8 | (0xFF & sequenceBuffer[p]);
|
||||||
|
p = nextBufferPos(p);
|
||||||
|
sequence = sequence << 8 | (0xFF & sequenceBuffer[p]);
|
||||||
|
return (short) (node.add(sequence << 8) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
private int nextBufferPos(int p) {
|
||||||
|
p++;
|
||||||
|
if (p >= sequenceBuffer.length) {
|
||||||
|
p = 0;
|
||||||
|
}
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base class for nodes in the tree. Subclasses are optimised for use at
|
||||||
|
* different locations in the tree - speed-optimized nodes represent
|
||||||
|
* branches near the root while space-optimized nodes are used for deeper
|
||||||
|
* leaves/branches.
|
||||||
|
*/
|
||||||
|
abstract class TreeNode {
|
||||||
|
|
||||||
|
TreeNode(byte key, TreeNode parentNode, int depth) {
|
||||||
|
nodesAllocatedByDepth[depth]++;
|
||||||
|
}
|
||||||
|
|
||||||
|
public abstract TreeNode add(byte b, int depth);
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param byteSequence
|
||||||
|
* a sequence of bytes encoded as an int
|
||||||
|
* @return the number of times the full sequence has been seen (counting
|
||||||
|
* up to a maximum of 32767).
|
||||||
|
*/
|
||||||
|
public abstract short add(int byteSequence);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node implementation for use at the root of the tree that sacrifices space
|
||||||
|
// for speed.
|
||||||
|
class RootTreeNode extends TreeNode {
|
||||||
|
|
||||||
|
// A null-or-256 sized array that can be indexed into using a byte for
|
||||||
|
// fast access.
|
||||||
|
// Being near the root of the tree it is expected that this is a
|
||||||
|
// non-sparse array.
|
||||||
|
TreeNode[] children;
|
||||||
|
|
||||||
|
RootTreeNode(byte key, TreeNode parentNode, int depth) {
|
||||||
|
super(key, parentNode, depth);
|
||||||
|
bytesAllocated += ROOT_TREE_NODE_OBJECT_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
public TreeNode add(byte b, int depth) {
|
||||||
|
if (children == null) {
|
||||||
|
children = new TreeNode[256];
|
||||||
|
bytesAllocated += (RamUsageEstimator.NUM_BYTES_OBJECT_REF * 256);
|
||||||
|
}
|
||||||
|
int bIndex = 0xFF & b;
|
||||||
|
TreeNode node = children[bIndex];
|
||||||
|
if (node == null) {
|
||||||
|
if (depth <= 1) {
|
||||||
|
// Depths 0 and 1 use RootTreeNode impl and create
|
||||||
|
// RootTreeNodeImpl children
|
||||||
|
node = new RootTreeNode(b, this, depth);
|
||||||
|
} else {
|
||||||
|
// Deeper-level nodes are less visited but more numerous
|
||||||
|
// so use a more space-friendly data structure
|
||||||
|
node = new LightweightTreeNode(b, this, depth);
|
||||||
|
}
|
||||||
|
children[bIndex] = node;
|
||||||
|
}
|
||||||
|
return node;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public short add(int byteSequence) {
|
||||||
|
throw new UnsupportedOperationException("Root nodes do not support byte sequences encoded as integers");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Node implementation for use by the depth 3 branches of the tree that
|
||||||
|
// sacrifices speed for space.
|
||||||
|
final class LightweightTreeNode extends TreeNode {
|
||||||
|
|
||||||
|
// An array dynamically resized but frequently only sized 1 as most
|
||||||
|
// sequences leading to end leaves are one-off paths.
|
||||||
|
// It is scanned for matches sequentially and benchmarks showed
|
||||||
|
// that sorting contents on insertion didn't improve performance.
|
||||||
|
int[] children = null;
|
||||||
|
|
||||||
|
LightweightTreeNode(byte key, TreeNode parentNode, int depth) {
|
||||||
|
super(key, parentNode, depth);
|
||||||
|
bytesAllocated += LIGHTWEIGHT_TREE_NODE_OBJECT_SIZE;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public short add(int byteSequence) {
|
||||||
|
if (children == null) {
|
||||||
|
// Create array adding new child with the byte sequence combined with hitcount of 1.
|
||||||
|
// Most nodes at this level we expect to have only 1 child so we start with the
|
||||||
|
// smallest possible child array.
|
||||||
|
children = new int[1];
|
||||||
|
bytesAllocated += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + Integer.BYTES;
|
||||||
|
children[0] = byteSequence + 1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
// Find existing child and if discovered increment count
|
||||||
|
for (int i = 0; i < children.length; i++) {
|
||||||
|
int child = children[i];
|
||||||
|
if (byteSequence == (child & 0xFFFFFF00)) {
|
||||||
|
int hitCount = child & 0xFF;
|
||||||
|
if (hitCount < MAX_HIT_COUNT) {
|
||||||
|
children[i]++;
|
||||||
|
}
|
||||||
|
return (short) (hitCount + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Grow array adding new child
|
||||||
|
int[] newChildren = new int[children.length + 1];
|
||||||
|
bytesAllocated += Integer.BYTES;
|
||||||
|
|
||||||
|
System.arraycopy(children, 0, newChildren, 0, children.length);
|
||||||
|
children = newChildren;
|
||||||
|
// Combine the byte sequence with a hit count of 1 into an int.
|
||||||
|
children[newChildren.length - 1] = byteSequence + 1;
|
||||||
|
nodesResizedByDepth++;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TreeNode add(byte b, int depth) {
|
||||||
|
throw new UnsupportedOperationException("Leaf nodes do not take byte sequences");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public final long getEstimatedSizeInBytes() {
|
||||||
|
return bytesAllocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Performance info - the number of nodes allocated at each depth
|
||||||
|
*/
|
||||||
|
public int[] getNodesAllocatedByDepth() {
|
||||||
|
return nodesAllocatedByDepth.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return Performance info - the number of resizing of children arrays, at
|
||||||
|
* each depth
|
||||||
|
*/
|
||||||
|
public int getNodesResizedByDepth() {
|
||||||
|
return nodesResizedByDepth;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -17,18 +17,19 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
package org.apache.lucene.analysis.miscellaneous;
|
||||||
apply plugin: 'elasticsearch.rest-test'
|
|
||||||
|
|
||||||
/* This project runs the core REST tests against a 2 node cluster where one of the nodes has a different minor. */
|
import org.apache.lucene.util.Attribute;
|
||||||
integTest {
|
|
||||||
includePackaged = true
|
/**
|
||||||
|
* Provides statistics useful for detecting duplicate sections of text
|
||||||
|
*/
|
||||||
|
public interface DuplicateSequenceAttribute extends Attribute {
|
||||||
|
/**
|
||||||
|
* @return The number of times this token has been seen previously as part
|
||||||
|
* of a sequence (counts to a max of 255)
|
||||||
|
*/
|
||||||
|
short getNumPriorUsesInASequence();
|
||||||
|
|
||||||
|
void setNumPriorUsesInASequence(short len);
|
||||||
}
|
}
|
||||||
|
|
||||||
integTestCluster {
|
|
||||||
numNodes = 4
|
|
||||||
numBwcNodes = 2
|
|
||||||
bwcVersion = project.wireCompatVersions[-1]
|
|
||||||
setting 'logger.org.elasticsearch', 'DEBUG'
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.lucene.analysis.miscellaneous;
|
||||||
|
|
||||||
|
import org.apache.lucene.util.AttributeImpl;
|
||||||
|
import org.apache.lucene.util.AttributeReflector;
|
||||||
|
|
||||||
|
public class DuplicateSequenceAttributeImpl extends AttributeImpl implements DuplicateSequenceAttribute {
|
||||||
|
protected short numPriorUsesInASequence = 0;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void clear() {
|
||||||
|
numPriorUsesInASequence = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void copyTo(AttributeImpl target) {
|
||||||
|
DuplicateSequenceAttributeImpl t = (DuplicateSequenceAttributeImpl) target;
|
||||||
|
t.numPriorUsesInASequence = numPriorUsesInASequence;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public short getNumPriorUsesInASequence() {
|
||||||
|
return numPriorUsesInASequence;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setNumPriorUsesInASequence(short len) {
|
||||||
|
numPriorUsesInASequence = len;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void reflectWith(AttributeReflector reflector) {
|
||||||
|
reflector.reflect(DuplicateSequenceAttribute.class, "sequenceLength", numPriorUsesInASequence);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides a static final field that can be used to check if assertions are enabled. Since this field might be used elsewhere to check if
|
||||||
|
* assertions are enabled, if you are running with assertions enabled for specific packages or classes, you should enable assertions on this
|
||||||
|
* class too (e.g., {@code -ea org.elasticsearch.Assertions -ea org.elasticsearch.cluster.service.MasterService}).
|
||||||
|
*/
|
||||||
|
public final class Assertions {
|
||||||
|
|
||||||
|
private Assertions() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public static final boolean ENABLED;
|
||||||
|
|
||||||
|
static {
|
||||||
|
boolean enabled = false;
|
||||||
|
/*
|
||||||
|
* If assertions are enabled, the following line will be evaluated and enabled will have the value true, otherwise when assertions
|
||||||
|
* are disabled enabled will have the value false.
|
||||||
|
*/
|
||||||
|
// noinspection ConstantConditions,AssertWithSideEffects
|
||||||
|
assert enabled = true;
|
||||||
|
// noinspection ConstantConditions
|
||||||
|
ENABLED = enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -877,8 +877,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
||||||
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83, UNKNOWN_VERSION_ADDED),
|
org.elasticsearch.transport.ReceiveTimeoutTransportException::new, 83, UNKNOWN_VERSION_ADDED),
|
||||||
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
|
NODE_DISCONNECTED_EXCEPTION(org.elasticsearch.transport.NodeDisconnectedException.class,
|
||||||
org.elasticsearch.transport.NodeDisconnectedException::new, 84, UNKNOWN_VERSION_ADDED),
|
org.elasticsearch.transport.NodeDisconnectedException::new, 84, UNKNOWN_VERSION_ADDED),
|
||||||
ALREADY_EXPIRED_EXCEPTION(org.elasticsearch.index.AlreadyExpiredException.class,
|
// 85 used to be for AlreadyExpiredException
|
||||||
org.elasticsearch.index.AlreadyExpiredException::new, 85, UNKNOWN_VERSION_ADDED),
|
|
||||||
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
|
AGGREGATION_EXECUTION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationExecutionException.class,
|
||||||
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86, UNKNOWN_VERSION_ADDED),
|
org.elasticsearch.search.aggregations.AggregationExecutionException::new, 86, UNKNOWN_VERSION_ADDED),
|
||||||
// 87 used to be for MergeMappingException
|
// 87 used to be for MergeMappingException
|
||||||
|
|
|
@ -74,15 +74,17 @@ public class Version implements Comparable<Version> {
|
||||||
public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||||
public static final int V_5_4_0_ID = 5040099;
|
public static final int V_5_4_0_ID = 5040099;
|
||||||
public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||||
public static final int V_5_5_0_ID_UNRELEASED = 5050099;
|
public static final int V_5_4_1_ID = 5040199;
|
||||||
public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
|
||||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
public static final int V_5_5_0_ID = 5050099;
|
||||||
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_1);
|
||||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||||
public static final int V_6_0_0_alpha2_ID_UNRELEASED = 6000002;
|
public static final Version V_6_0_0_alpha1 =
|
||||||
public static final Version V_6_0_0_alpha2_UNRELEASED =
|
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||||
new Version(V_6_0_0_alpha2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
public static final int V_6_0_0_alpha2_ID = 6000002;
|
||||||
public static final Version CURRENT = V_6_0_0_alpha2_UNRELEASED;
|
public static final Version V_6_0_0_alpha2 =
|
||||||
|
new Version(V_6_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||||
|
public static final Version CURRENT = V_6_0_0_alpha2;
|
||||||
|
|
||||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||||
|
|
||||||
|
@ -97,12 +99,14 @@ public class Version implements Comparable<Version> {
|
||||||
|
|
||||||
public static Version fromId(int id) {
|
public static Version fromId(int id) {
|
||||||
switch (id) {
|
switch (id) {
|
||||||
case V_6_0_0_alpha2_ID_UNRELEASED:
|
case V_6_0_0_alpha2_ID:
|
||||||
return V_6_0_0_alpha2_UNRELEASED;
|
return V_6_0_0_alpha2;
|
||||||
case V_6_0_0_alpha1_ID_UNRELEASED:
|
case V_6_0_0_alpha1_ID:
|
||||||
return V_6_0_0_alpha1_UNRELEASED;
|
return V_6_0_0_alpha1;
|
||||||
case V_5_5_0_ID_UNRELEASED:
|
case V_5_5_0_ID:
|
||||||
return V_5_5_0_UNRELEASED;
|
return V_5_5_0;
|
||||||
|
case V_5_4_1_ID:
|
||||||
|
return V_5_4_1;
|
||||||
case V_5_4_0_ID:
|
case V_5_4_0_ID:
|
||||||
return V_5_4_0;
|
return V_5_4_0;
|
||||||
case V_5_3_2_ID:
|
case V_5_3_2_ID:
|
||||||
|
|
|
@ -261,7 +261,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
||||||
type = in.readString();
|
type = in.readString();
|
||||||
id = in.readString();
|
id = in.readString();
|
||||||
version = in.readZLong();
|
version = in.readZLong();
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
seqNo = in.readZLong();
|
seqNo = in.readZLong();
|
||||||
primaryTerm = in.readVLong();
|
primaryTerm = in.readVLong();
|
||||||
} else {
|
} else {
|
||||||
|
@ -279,7 +279,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
||||||
out.writeString(type);
|
out.writeString(type);
|
||||||
out.writeString(id);
|
out.writeString(id);
|
||||||
out.writeZLong(version);
|
out.writeZLong(version);
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
out.writeZLong(seqNo);
|
out.writeZLong(seqNo);
|
||||||
out.writeVLong(primaryTerm);
|
out.writeVLong(primaryTerm);
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,7 +79,7 @@ public class ClusterStateResponse extends ActionResponse {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
clusterName = new ClusterName(in);
|
clusterName = new ClusterName(in);
|
||||||
clusterState = ClusterState.readFrom(in, null);
|
clusterState = ClusterState.readFrom(in, null);
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
totalCompressedSize = new ByteSizeValue(in);
|
totalCompressedSize = new ByteSizeValue(in);
|
||||||
} else {
|
} else {
|
||||||
// in a mixed cluster, if a pre 6.0 node processes the get cluster state
|
// in a mixed cluster, if a pre 6.0 node processes the get cluster state
|
||||||
|
@ -95,7 +95,7 @@ public class ClusterStateResponse extends ActionResponse {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
clusterName.writeTo(out);
|
clusterName.writeTo(out);
|
||||||
clusterState.writeTo(out);
|
clusterState.writeTo(out);
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
totalCompressedSize.writeTo(out);
|
totalCompressedSize.writeTo(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -492,7 +492,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
final String type = in.readString();
|
final String type = in.readString();
|
||||||
String source = in.readString();
|
String source = in.readString();
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO change to 5.3.0 after backport
|
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO change to 5.3.0 after backport
|
||||||
// we do not know the content type that comes from earlier versions so we autodetect and convert
|
// we do not know the content type that comes from earlier versions so we autodetect and convert
|
||||||
source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source));
|
source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source));
|
||||||
}
|
}
|
||||||
|
|
|
@ -308,7 +308,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
||||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||||
type = in.readOptionalString();
|
type = in.readOptionalString();
|
||||||
source = in.readString();
|
source = in.readString();
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO change to V_5_3 once backported
|
if (in.getVersion().before(Version.V_5_3_0)) {
|
||||||
// we do not know the format from earlier versions so convert if necessary
|
// we do not know the format from earlier versions so convert if necessary
|
||||||
source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source));
|
source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source));
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.indices.shards;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
|
@ -34,7 +35,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
import org.elasticsearch.common.xcontent.ToXContent;
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -165,7 +165,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
node = new DiscoveryNode(in);
|
node = new DiscoveryNode(in);
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// legacy version
|
// legacy version
|
||||||
in.readLong();
|
in.readLong();
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
node.writeTo(out);
|
node.writeTo(out);
|
||||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// legacy version
|
// legacy version
|
||||||
out.writeLong(-1L);
|
out.writeLong(-1L);
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,7 +104,7 @@ public class ShardStats implements Streamable, Writeable, ToXContent {
|
||||||
statePath = in.readString();
|
statePath = in.readString();
|
||||||
dataPath = in.readString();
|
dataPath = in.readString();
|
||||||
isCustomDataPath = in.readBoolean();
|
isCustomDataPath = in.readBoolean();
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
seqNoStats = in.readOptionalWriteable(SeqNoStats::new);
|
seqNoStats = in.readOptionalWriteable(SeqNoStats::new);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ public class ShardStats implements Streamable, Writeable, ToXContent {
|
||||||
out.writeString(statePath);
|
out.writeString(statePath);
|
||||||
out.writeString(dataPath);
|
out.writeString(dataPath);
|
||||||
out.writeBoolean(isCustomDataPath);
|
out.writeBoolean(isCustomDataPath);
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
out.writeOptionalWriteable(seqNoStats);
|
out.writeOptionalWriteable(seqNoStats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -475,7 +475,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
||||||
cause = in.readString();
|
cause = in.readString();
|
||||||
name = in.readString();
|
name = in.readString();
|
||||||
|
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
indexPatterns = in.readList(StreamInput::readString);
|
indexPatterns = in.readList(StreamInput::readString);
|
||||||
} else {
|
} else {
|
||||||
indexPatterns = Collections.singletonList(in.readString());
|
indexPatterns = Collections.singletonList(in.readString());
|
||||||
|
@ -487,7 +487,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
final String type = in.readString();
|
final String type = in.readString();
|
||||||
String mappingSource = in.readString();
|
String mappingSource = in.readString();
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO change to V_5_3_0 once backported
|
if (in.getVersion().before(Version.V_5_3_0)) {
|
||||||
// we do not know the incoming type so convert it if needed
|
// we do not know the incoming type so convert it if needed
|
||||||
mappingSource =
|
mappingSource =
|
||||||
XContentHelper.convertToJson(new BytesArray(mappingSource), false, false, XContentFactory.xContentType(mappingSource));
|
XContentHelper.convertToJson(new BytesArray(mappingSource), false, false, XContentFactory.xContentType(mappingSource));
|
||||||
|
@ -512,7 +512,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
out.writeString(cause);
|
out.writeString(cause);
|
||||||
out.writeString(name);
|
out.writeString(name);
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
out.writeStringList(indexPatterns);
|
out.writeStringList(indexPatterns);
|
||||||
} else {
|
} else {
|
||||||
out.writeString(indexPatterns.size() > 0 ? indexPatterns.get(0) : "");
|
out.writeString(indexPatterns.size() > 0 ? indexPatterns.get(0) : "");
|
||||||
|
|
|
@ -78,7 +78,7 @@ public class BulkItemRequest implements Streamable {
|
||||||
if (in.readBoolean()) {
|
if (in.readBoolean()) {
|
||||||
primaryResponse = BulkItemResponse.readBulkItem(in);
|
primaryResponse = BulkItemResponse.readBulkItem(in);
|
||||||
}
|
}
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
|
||||||
boolean ignoreOnReplica = in.readBoolean();
|
boolean ignoreOnReplica = in.readBoolean();
|
||||||
if (ignoreOnReplica == false && primaryResponse != null) {
|
if (ignoreOnReplica == false && primaryResponse != null) {
|
||||||
assert primaryResponse.isFailed() == false : "expected no failure on the primary response";
|
assert primaryResponse.isFailed() == false : "expected no failure on the primary response";
|
||||||
|
@ -89,7 +89,7 @@ public class BulkItemRequest implements Streamable {
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(id);
|
out.writeVInt(id);
|
||||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
if (out.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
|
||||||
// old nodes expect updated version and version type on the request
|
// old nodes expect updated version and version type on the request
|
||||||
if (primaryResponse != null) {
|
if (primaryResponse != null) {
|
||||||
request.version(primaryResponse.getVersion());
|
request.version(primaryResponse.getVersion());
|
||||||
|
@ -102,7 +102,7 @@ public class BulkItemRequest implements Streamable {
|
||||||
DocWriteRequest.writeDocumentRequest(out, request);
|
DocWriteRequest.writeDocumentRequest(out, request);
|
||||||
}
|
}
|
||||||
out.writeOptionalStreamable(primaryResponse);
|
out.writeOptionalStreamable(primaryResponse);
|
||||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
if (out.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
|
||||||
if (primaryResponse != null) {
|
if (primaryResponse != null) {
|
||||||
out.writeBoolean(primaryResponse.isFailed()
|
out.writeBoolean(primaryResponse.isFailed()
|
||||||
|| primaryResponse.getResponse().getResult() == DocWriteResponse.Result.NOOP);
|
|| primaryResponse.getResponse().getResult() == DocWriteResponse.Result.NOOP);
|
||||||
|
|
|
@ -211,7 +211,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
||||||
id = in.readOptionalString();
|
id = in.readOptionalString();
|
||||||
cause = in.readException();
|
cause = in.readException();
|
||||||
status = ExceptionsHelper.status(cause);
|
status = ExceptionsHelper.status(cause);
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
seqNo = in.readZLong();
|
seqNo = in.readZLong();
|
||||||
} else {
|
} else {
|
||||||
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||||
|
@ -224,7 +224,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
||||||
out.writeString(getType());
|
out.writeString(getType());
|
||||||
out.writeOptionalString(getId());
|
out.writeOptionalString(getId());
|
||||||
out.writeException(getCause());
|
out.writeException(getCause());
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
out.writeZLong(getSeqNo());
|
out.writeZLong(getSeqNo());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,6 +54,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexNotFoundException;
|
import org.elasticsearch.index.IndexNotFoundException;
|
||||||
|
import org.elasticsearch.index.VersionType;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.indices.IndexClosedException;
|
import org.elasticsearch.indices.IndexClosedException;
|
||||||
import org.elasticsearch.ingest.IngestService;
|
import org.elasticsearch.ingest.IngestService;
|
||||||
|
@ -144,6 +145,11 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||||
// Attempt to create all the indices that we're going to need during the bulk before we start.
|
// Attempt to create all the indices that we're going to need during the bulk before we start.
|
||||||
// Step 1: collect all the indices in the request
|
// Step 1: collect all the indices in the request
|
||||||
final Set<String> indices = bulkRequest.requests.stream()
|
final Set<String> indices = bulkRequest.requests.stream()
|
||||||
|
// delete requests should not attempt to create the index (if the index does not
|
||||||
|
// exists), unless an external versioning is used
|
||||||
|
.filter(request -> request.opType() != DocWriteRequest.OpType.DELETE
|
||||||
|
|| request.versionType() == VersionType.EXTERNAL
|
||||||
|
|| request.versionType() == VersionType.EXTERNAL_GTE)
|
||||||
.map(DocWriteRequest::index)
|
.map(DocWriteRequest::index)
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
/* Step 2: filter that to indices that don't exist and we can create. At the same time build a map of indices we can't create
|
/* Step 2: filter that to indices that don't exist and we can create. At the same time build a map of indices we can't create
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
import org.apache.logging.log4j.util.Supplier;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.action.DocWriteRequest;
|
import org.elasticsearch.action.DocWriteRequest;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.delete.DeleteRequest;
|
import org.elasticsearch.action.delete.DeleteRequest;
|
||||||
|
@ -31,8 +30,8 @@ import org.elasticsearch.action.delete.DeleteResponse;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.index.IndexResponse;
|
import org.elasticsearch.action.index.IndexResponse;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
|
||||||
import org.elasticsearch.action.support.TransportActions;
|
import org.elasticsearch.action.support.TransportActions;
|
||||||
|
import org.elasticsearch.action.support.replication.ReplicationOperation;
|
||||||
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
|
import org.elasticsearch.action.support.replication.ReplicationResponse.ShardInfo;
|
||||||
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
import org.elasticsearch.action.support.replication.TransportWriteAction;
|
||||||
import org.elasticsearch.action.update.UpdateHelper;
|
import org.elasticsearch.action.update.UpdateHelper;
|
||||||
|
@ -56,7 +55,6 @@ import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||||
import org.elasticsearch.index.get.GetResult;
|
import org.elasticsearch.index.get.GetResult;
|
||||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
|
||||||
import org.elasticsearch.index.mapper.Mapping;
|
import org.elasticsearch.index.mapper.Mapping;
|
||||||
import org.elasticsearch.index.mapper.SourceToParse;
|
import org.elasticsearch.index.mapper.SourceToParse;
|
||||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||||
|
@ -414,13 +412,6 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
FAILURE
|
FAILURE
|
||||||
}
|
}
|
||||||
|
|
||||||
static {
|
|
||||||
assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_6_0_0_alpha1_UNRELEASED) == false:
|
|
||||||
"Remove logic handling NoOp result from primary response; see TODO in replicaItemExecutionMode" +
|
|
||||||
" as the current minimum compatible version [" +
|
|
||||||
Version.CURRENT.minimumCompatibilityVersion() + "] is after 6.0";
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Determines whether a bulk item request should be executed on the replica.
|
* Determines whether a bulk item request should be executed on the replica.
|
||||||
* @return {@link ReplicaItemExecutionMode#NORMAL} upon normal primary execution with no failures
|
* @return {@link ReplicaItemExecutionMode#NORMAL} upon normal primary execution with no failures
|
||||||
|
@ -436,10 +427,11 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
? ReplicaItemExecutionMode.FAILURE // we have a seq no generated with the failure, replicate as no-op
|
? ReplicaItemExecutionMode.FAILURE // we have a seq no generated with the failure, replicate as no-op
|
||||||
: ReplicaItemExecutionMode.NOOP; // no seq no generated, ignore replication
|
: ReplicaItemExecutionMode.NOOP; // no seq no generated, ignore replication
|
||||||
} else {
|
} else {
|
||||||
// NOTE: write requests originating from pre-6.0 nodes can send a no-op operation to
|
// TODO: once we know for sure that every operation that has been processed on the primary is assigned a seq#
|
||||||
// the replica; we ignore replication
|
// (i.e., all nodes on the cluster are on v6.0.0 or higher) we can use the existence of a seq# to indicate whether
|
||||||
// TODO: remove noOp result check from primary response, when pre-6.0 nodes are not supported
|
// an operation should be processed or be treated as a noop. This means we could remove this method and the
|
||||||
// we should return ReplicationItemExecutionMode.NORMAL instead
|
// ReplicaItemExecutionMode enum and have a simple boolean check for seq != UNASSIGNED_SEQ_NO which will work for
|
||||||
|
// both failures and indexing operations.
|
||||||
return primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP
|
return primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP
|
||||||
? ReplicaItemExecutionMode.NORMAL // execution successful on primary
|
? ReplicaItemExecutionMode.NORMAL // execution successful on primary
|
||||||
: ReplicaItemExecutionMode.NOOP; // ignore replication
|
: ReplicaItemExecutionMode.NOOP; // ignore replication
|
||||||
|
@ -454,6 +446,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
|
|
||||||
public static Translog.Location performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
|
public static Translog.Location performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
|
||||||
Translog.Location location = null;
|
Translog.Location location = null;
|
||||||
|
final long primaryTerm = request.primaryTerm();
|
||||||
for (int i = 0; i < request.items().length; i++) {
|
for (int i = 0; i < request.items().length; i++) {
|
||||||
BulkItemRequest item = request.items()[i];
|
BulkItemRequest item = request.items()[i];
|
||||||
final Engine.Result operationResult;
|
final Engine.Result operationResult;
|
||||||
|
@ -465,10 +458,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
switch (docWriteRequest.opType()) {
|
switch (docWriteRequest.opType()) {
|
||||||
case CREATE:
|
case CREATE:
|
||||||
case INDEX:
|
case INDEX:
|
||||||
operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica);
|
operationResult =
|
||||||
|
executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, primaryTerm, replica);
|
||||||
break;
|
break;
|
||||||
case DELETE:
|
case DELETE:
|
||||||
operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica);
|
operationResult =
|
||||||
|
executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, primaryTerm, replica);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IllegalStateException("Unexpected request operation type on replica: "
|
throw new IllegalStateException("Unexpected request operation type on replica: "
|
||||||
|
@ -536,14 +531,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
* Execute the given {@link IndexRequest} on a replica shard, throwing a
|
* Execute the given {@link IndexRequest} on a replica shard, throwing a
|
||||||
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
|
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
|
||||||
*/
|
*/
|
||||||
private static Engine.IndexResult executeIndexRequestOnReplica(
|
private static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request,
|
||||||
DocWriteResponse primaryResponse,
|
long primaryTerm, IndexShard replica) throws IOException {
|
||||||
IndexRequest request,
|
|
||||||
IndexShard replica) throws IOException {
|
|
||||||
|
|
||||||
final Engine.Index operation;
|
final Engine.Index operation;
|
||||||
try {
|
try {
|
||||||
operation = prepareIndexOperationOnReplica(primaryResponse, request, replica);
|
operation = prepareIndexOperationOnReplica(primaryResponse, request, primaryTerm, replica);
|
||||||
} catch (MapperParsingException e) {
|
} catch (MapperParsingException e) {
|
||||||
return new Engine.IndexResult(e, primaryResponse.getVersion(), primaryResponse.getSeqNo());
|
return new Engine.IndexResult(e, primaryResponse.getVersion(), primaryResponse.getSeqNo());
|
||||||
}
|
}
|
||||||
|
@ -561,6 +554,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
static Engine.Index prepareIndexOperationOnReplica(
|
static Engine.Index prepareIndexOperationOnReplica(
|
||||||
DocWriteResponse primaryResponse,
|
DocWriteResponse primaryResponse,
|
||||||
IndexRequest request,
|
IndexRequest request,
|
||||||
|
long primaryTerm,
|
||||||
IndexShard replica) {
|
IndexShard replica) {
|
||||||
|
|
||||||
final ShardId shardId = replica.shardId();
|
final ShardId shardId = replica.shardId();
|
||||||
|
@ -573,7 +567,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
|
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
|
||||||
assert versionType.validateVersionForWrites(version);
|
assert versionType.validateVersionForWrites(version);
|
||||||
|
|
||||||
return replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType,
|
return replica.prepareIndexOnReplica(sourceToParse, seqNo, primaryTerm, version, versionType,
|
||||||
request.getAutoGeneratedTimestamp(), request.isRetry());
|
request.getAutoGeneratedTimestamp(), request.isRetry());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -655,7 +649,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request,
|
private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request,
|
||||||
IndexShard replica) throws Exception {
|
final long primaryTerm, IndexShard replica) throws Exception {
|
||||||
if (replica.indexSettings().isSingleType()) {
|
if (replica.indexSettings().isSingleType()) {
|
||||||
// We need to wait for the replica to have the mappings
|
// We need to wait for the replica to have the mappings
|
||||||
Mapping update;
|
Mapping update;
|
||||||
|
@ -675,7 +669,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
final long version = primaryResponse.getVersion();
|
final long version = primaryResponse.getVersion();
|
||||||
assert versionType.validateVersionForWrites(version);
|
assert versionType.validateVersionForWrites(version);
|
||||||
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(),
|
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(),
|
||||||
primaryResponse.getSeqNo(), request.primaryTerm(), version, versionType);
|
primaryResponse.getSeqNo(), primaryTerm, version, versionType);
|
||||||
return replica.delete(delete);
|
return replica.delete(delete);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
fields = in.readStringArray();
|
fields = in.readStringArray();
|
||||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||||
indices = in.readStringArray();
|
indices = in.readStringArray();
|
||||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||||
mergeResults = in.readBoolean();
|
mergeResults = in.readBoolean();
|
||||||
|
@ -91,7 +91,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
out.writeStringArray(fields);
|
out.writeStringArray(fields);
|
||||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||||
out.writeStringArray(indices);
|
out.writeStringArray(indices);
|
||||||
indicesOptions.writeIndicesOptions(out);
|
indicesOptions.writeIndicesOptions(out);
|
||||||
out.writeBoolean(mergeResults);
|
out.writeBoolean(mergeResults);
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
this.responseMap =
|
this.responseMap =
|
||||||
in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField);
|
in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField);
|
||||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||||
indexResponses = in.readList(FieldCapabilitiesIndexResponse::new);
|
indexResponses = in.readList(FieldCapabilitiesIndexResponse::new);
|
||||||
} else {
|
} else {
|
||||||
indexResponses = Collections.emptyList();
|
indexResponses = Collections.emptyList();
|
||||||
|
@ -101,7 +101,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);
|
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);
|
||||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||||
out.writeList(indexResponses);
|
out.writeList(indexResponses);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -118,7 +118,9 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
||||||
for (Map.Entry<String, OriginalIndices> remoteIndices : remoteClusterIndices.entrySet()) {
|
for (Map.Entry<String, OriginalIndices> remoteIndices : remoteClusterIndices.entrySet()) {
|
||||||
String clusterAlias = remoteIndices.getKey();
|
String clusterAlias = remoteIndices.getKey();
|
||||||
OriginalIndices originalIndices = remoteIndices.getValue();
|
OriginalIndices originalIndices = remoteIndices.getValue();
|
||||||
Transport.Connection connection = remoteClusterService.getConnection(remoteIndices.getKey());
|
// if we are connected this is basically a no-op, if we are not we try to connect in parallel in a non-blocking fashion
|
||||||
|
remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(v -> {
|
||||||
|
Transport.Connection connection = remoteClusterService.getConnection(clusterAlias);
|
||||||
FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest();
|
FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest();
|
||||||
remoteRequest.setMergeResults(false); // we need to merge on this node
|
remoteRequest.setMergeResults(false); // we need to merge on this node
|
||||||
remoteRequest.indicesOptions(originalIndices.indicesOptions());
|
remoteRequest.indicesOptions(originalIndices.indicesOptions());
|
||||||
|
@ -126,6 +128,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
||||||
remoteRequest.fields(request.fields());
|
remoteRequest.fields(request.fields());
|
||||||
transportService.sendRequest(connection, FieldCapabilitiesAction.NAME, remoteRequest, TransportRequestOptions.EMPTY,
|
transportService.sendRequest(connection, FieldCapabilitiesAction.NAME, remoteRequest, TransportRequestOptions.EMPTY,
|
||||||
new TransportResponseHandler<FieldCapabilitiesResponse>() {
|
new TransportResponseHandler<FieldCapabilitiesResponse>() {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldCapabilitiesResponse newInstance() {
|
public FieldCapabilitiesResponse newInstance() {
|
||||||
return new FieldCapabilitiesResponse();
|
return new FieldCapabilitiesResponse();
|
||||||
|
@ -133,12 +136,15 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleResponse(FieldCapabilitiesResponse response) {
|
public void handleResponse(FieldCapabilitiesResponse response) {
|
||||||
|
try {
|
||||||
for (FieldCapabilitiesIndexResponse res : response.getIndexResponses()) {
|
for (FieldCapabilitiesIndexResponse res : response.getIndexResponses()) {
|
||||||
indexResponses.add(new FieldCapabilitiesIndexResponse(RemoteClusterAware.buildRemoteIndexName(clusterAlias,
|
indexResponses.add(new FieldCapabilitiesIndexResponse(RemoteClusterAware.
|
||||||
res.getIndexName()), res.get()));
|
buildRemoteIndexName(clusterAlias, res.getIndexName()), res.get()));
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
onResponse.run();
|
onResponse.run();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void handleException(TransportException exp) {
|
public void handleException(TransportException exp) {
|
||||||
|
@ -150,6 +156,7 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
||||||
return ThreadPool.Names.SAME;
|
return ThreadPool.Names.SAME;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
}, e -> onResponse.run()));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -523,7 +523,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
id = in.readOptionalString();
|
id = in.readOptionalString();
|
||||||
routing = in.readOptionalString();
|
routing = in.readOptionalString();
|
||||||
parent = in.readOptionalString();
|
parent = in.readOptionalString();
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
in.readOptionalString(); // timestamp
|
in.readOptionalString(); // timestamp
|
||||||
in.readOptionalWriteable(TimeValue::new); // ttl
|
in.readOptionalWriteable(TimeValue::new); // ttl
|
||||||
}
|
}
|
||||||
|
@ -548,7 +548,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
out.writeOptionalString(id);
|
out.writeOptionalString(id);
|
||||||
out.writeOptionalString(routing);
|
out.writeOptionalString(routing);
|
||||||
out.writeOptionalString(parent);
|
out.writeOptionalString(parent);
|
||||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// Serialize a fake timestamp. 5.x expect this value to be set by the #process method so we can't use null.
|
// Serialize a fake timestamp. 5.x expect this value to be set by the #process method so we can't use null.
|
||||||
// On the other hand, indices created on 5.x do not index the timestamp field. Therefore passing a 0 (or any value) for
|
// On the other hand, indices created on 5.x do not index the timestamp field. Therefore passing a 0 (or any value) for
|
||||||
// the transport layer OK as it will be ignored.
|
// the transport layer OK as it will be ignored.
|
||||||
|
|
|
@ -23,6 +23,9 @@ import org.elasticsearch.action.ActionRequest;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -31,7 +34,7 @@ import java.util.List;
|
||||||
|
|
||||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||||
|
|
||||||
public class ClearScrollRequest extends ActionRequest {
|
public class ClearScrollRequest extends ActionRequest implements ToXContentObject {
|
||||||
|
|
||||||
private List<String> scrollIds;
|
private List<String> scrollIds;
|
||||||
|
|
||||||
|
@ -83,4 +86,47 @@ public class ClearScrollRequest extends ActionRequest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
builder.startObject();
|
||||||
|
builder.startArray("scroll_id");
|
||||||
|
for (String scrollId : scrollIds) {
|
||||||
|
builder.value(scrollId);
|
||||||
|
}
|
||||||
|
builder.endArray();
|
||||||
|
builder.endObject();
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void fromXContent(XContentParser parser) throws IOException {
|
||||||
|
scrollIds = null;
|
||||||
|
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||||
|
throw new IllegalArgumentException("Malformed content, must start with an object");
|
||||||
|
} else {
|
||||||
|
XContentParser.Token token;
|
||||||
|
String currentFieldName = null;
|
||||||
|
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
|
currentFieldName = parser.currentName();
|
||||||
|
} else if ("scroll_id".equals(currentFieldName)){
|
||||||
|
if (token == XContentParser.Token.START_ARRAY) {
|
||||||
|
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||||
|
if (token.isValue() == false) {
|
||||||
|
throw new IllegalArgumentException("scroll_id array element should only contain scroll_id");
|
||||||
|
}
|
||||||
|
addScrollId(parser.text());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (token.isValue() == false) {
|
||||||
|
throw new IllegalArgumentException("scroll_id element should only contain scroll_id");
|
||||||
|
}
|
||||||
|
addScrollId(parser.text());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException("Unknown parameter [" + currentFieldName
|
||||||
|
+ "] in request body or parameter is of the wrong type[" + token + "] ");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,19 +20,34 @@
|
||||||
package org.elasticsearch.action.search;
|
package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||||
import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
|
import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
|
||||||
import static org.elasticsearch.rest.RestStatus.OK;
|
import static org.elasticsearch.rest.RestStatus.OK;
|
||||||
|
|
||||||
public class ClearScrollResponse extends ActionResponse implements StatusToXContentObject {
|
public class ClearScrollResponse extends ActionResponse implements StatusToXContentObject {
|
||||||
|
|
||||||
|
private static final ParseField SUCCEEDED = new ParseField("succeeded");
|
||||||
|
private static final ParseField NUMFREED = new ParseField("num_freed");
|
||||||
|
|
||||||
|
private static final ConstructingObjectParser<ClearScrollResponse, Void> PARSER = new ConstructingObjectParser<>("clear_scroll",
|
||||||
|
true, a -> new ClearScrollResponse((boolean)a[0], (int)a[1]));
|
||||||
|
static {
|
||||||
|
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SUCCEEDED, ObjectParser.ValueType.BOOLEAN);
|
||||||
|
PARSER.declareField(constructorArg(), (parser, context) -> parser.intValue(), NUMFREED, ObjectParser.ValueType.INT);
|
||||||
|
}
|
||||||
|
|
||||||
private boolean succeeded;
|
private boolean succeeded;
|
||||||
private int numFreed;
|
private int numFreed;
|
||||||
|
|
||||||
|
@ -67,12 +82,19 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
builder.field(Fields.SUCCEEDED, succeeded);
|
builder.field(SUCCEEDED.getPreferredName(), succeeded);
|
||||||
builder.field(Fields.NUMFREED, numFreed);
|
builder.field(NUMFREED.getPreferredName(), numFreed);
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse the clear scroll response body into a new {@link ClearScrollResponse} object
|
||||||
|
*/
|
||||||
|
public static ClearScrollResponse fromXContent(XContentParser parser) throws IOException {
|
||||||
|
return PARSER.apply(parser, null);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
|
@ -86,9 +108,4 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
||||||
out.writeBoolean(succeeded);
|
out.writeBoolean(succeeded);
|
||||||
out.writeVInt(numFreed);
|
out.writeVInt(numFreed);
|
||||||
}
|
}
|
||||||
|
|
||||||
static final class Fields {
|
|
||||||
static final String SUCCEEDED = "succeeded";
|
|
||||||
static final String NUMFREED = "num_freed";
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.search.collapse.CollapseBuilder;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -59,7 +60,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
||||||
final SearchRequest searchRequest = context.getRequest();
|
final SearchRequest searchRequest = context.getRequest();
|
||||||
return searchRequest.source() != null &&
|
return searchRequest.source() != null &&
|
||||||
searchRequest.source().collapse() != null &&
|
searchRequest.source().collapse() != null &&
|
||||||
searchRequest.source().collapse().getInnerHit() != null;
|
searchRequest.source().collapse().getInnerHits().isEmpty() == false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -67,6 +68,7 @@ final class ExpandSearchPhase extends SearchPhase {
|
||||||
if (isCollapseRequest() && searchResponse.getHits().getHits().length > 0) {
|
if (isCollapseRequest() && searchResponse.getHits().getHits().length > 0) {
|
||||||
SearchRequest searchRequest = context.getRequest();
|
SearchRequest searchRequest = context.getRequest();
|
||||||
CollapseBuilder collapseBuilder = searchRequest.source().collapse();
|
CollapseBuilder collapseBuilder = searchRequest.source().collapse();
|
||||||
|
final List<InnerHitBuilder> innerHitBuilders = collapseBuilder.getInnerHits();
|
||||||
MultiSearchRequest multiRequest = new MultiSearchRequest();
|
MultiSearchRequest multiRequest = new MultiSearchRequest();
|
||||||
if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) {
|
if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) {
|
||||||
multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests());
|
multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests());
|
||||||
|
@ -83,17 +85,20 @@ final class ExpandSearchPhase extends SearchPhase {
|
||||||
if (origQuery != null) {
|
if (origQuery != null) {
|
||||||
groupQuery.must(origQuery);
|
groupQuery.must(origQuery);
|
||||||
}
|
}
|
||||||
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(collapseBuilder.getInnerHit())
|
for (InnerHitBuilder innerHitBuilder : innerHitBuilders) {
|
||||||
|
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder)
|
||||||
.query(groupQuery);
|
.query(groupQuery);
|
||||||
SearchRequest groupRequest = new SearchRequest(searchRequest.indices())
|
SearchRequest groupRequest = new SearchRequest(searchRequest.indices())
|
||||||
.types(searchRequest.types())
|
.types(searchRequest.types())
|
||||||
.source(sourceBuilder);
|
.source(sourceBuilder);
|
||||||
multiRequest.add(groupRequest);
|
multiRequest.add(groupRequest);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(),
|
context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(),
|
||||||
ActionListener.wrap(response -> {
|
ActionListener.wrap(response -> {
|
||||||
Iterator<MultiSearchResponse.Item> it = response.iterator();
|
Iterator<MultiSearchResponse.Item> it = response.iterator();
|
||||||
for (SearchHit hit : searchResponse.getHits()) {
|
for (SearchHit hit : searchResponse.getHits()) {
|
||||||
|
for (InnerHitBuilder innerHitBuilder : innerHitBuilders) {
|
||||||
MultiSearchResponse.Item item = it.next();
|
MultiSearchResponse.Item item = it.next();
|
||||||
if (item.isFailure()) {
|
if (item.isFailure()) {
|
||||||
context.onPhaseFailure(this, "failed to expand hits", item.getFailure());
|
context.onPhaseFailure(this, "failed to expand hits", item.getFailure());
|
||||||
|
@ -101,9 +106,10 @@ final class ExpandSearchPhase extends SearchPhase {
|
||||||
}
|
}
|
||||||
SearchHits innerHits = item.getResponse().getHits();
|
SearchHits innerHits = item.getResponse().getHits();
|
||||||
if (hit.getInnerHits() == null) {
|
if (hit.getInnerHits() == null) {
|
||||||
hit.setInnerHits(new HashMap<>(1));
|
hit.setInnerHits(new HashMap<>(innerHitBuilders.size()));
|
||||||
|
}
|
||||||
|
hit.getInnerHits().put(innerHitBuilder.getName(), innerHits);
|
||||||
}
|
}
|
||||||
hit.getInnerHits().put(collapseBuilder.getInnerHit().getName(), innerHits);
|
|
||||||
}
|
}
|
||||||
context.executeNextPhase(this, nextPhaseFactory.apply(searchResponse));
|
context.executeNextPhase(this, nextPhaseFactory.apply(searchResponse));
|
||||||
}, context::onFailure)
|
}, context::onFailure)
|
||||||
|
|
|
@ -21,32 +21,46 @@ package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
import org.elasticsearch.rest.action.RestActions;
|
import org.elasticsearch.rest.action.RestActions;
|
||||||
import org.elasticsearch.search.SearchHits;
|
import org.elasticsearch.search.SearchHits;
|
||||||
import org.elasticsearch.search.aggregations.Aggregations;
|
import org.elasticsearch.search.aggregations.Aggregations;
|
||||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||||
|
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||||
import org.elasticsearch.search.suggest.Suggest;
|
import org.elasticsearch.search.suggest.Suggest;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||||
import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse;
|
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||||
|
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField;
|
||||||
|
import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A response of a search request.
|
* A response of a search request.
|
||||||
*/
|
*/
|
||||||
public class SearchResponse extends ActionResponse implements StatusToXContentObject {
|
public class SearchResponse extends ActionResponse implements StatusToXContentObject {
|
||||||
|
|
||||||
private InternalSearchResponse internalResponse;
|
private static final ParseField SCROLL_ID = new ParseField("_scroll_id");
|
||||||
|
private static final ParseField TOOK = new ParseField("took");
|
||||||
|
private static final ParseField TIMED_OUT = new ParseField("timed_out");
|
||||||
|
private static final ParseField TERMINATED_EARLY = new ParseField("terminated_early");
|
||||||
|
private static final ParseField NUM_REDUCE_PHASES = new ParseField("num_reduce_phases");
|
||||||
|
|
||||||
|
private SearchResponseSections internalResponse;
|
||||||
|
|
||||||
private String scrollId;
|
private String scrollId;
|
||||||
|
|
||||||
|
@ -61,7 +75,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
||||||
public SearchResponse() {
|
public SearchResponse() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public SearchResponse(InternalSearchResponse internalResponse, String scrollId, int totalShards, int successfulShards,
|
public SearchResponse(SearchResponseSections internalResponse, String scrollId, int totalShards, int successfulShards,
|
||||||
long tookInMillis, ShardSearchFailure[] shardFailures) {
|
long tookInMillis, ShardSearchFailure[] shardFailures) {
|
||||||
this.internalResponse = internalResponse;
|
this.internalResponse = internalResponse;
|
||||||
this.scrollId = scrollId;
|
this.scrollId = scrollId;
|
||||||
|
@ -176,7 +190,8 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
||||||
*
|
*
|
||||||
* @return The profile results or an empty map
|
* @return The profile results or an empty map
|
||||||
*/
|
*/
|
||||||
@Nullable public Map<String, ProfileShardResult> getProfileResults() {
|
@Nullable
|
||||||
|
public Map<String, ProfileShardResult> getProfileResults() {
|
||||||
return internalResponse.profile();
|
return internalResponse.profile();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -190,15 +205,15 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
||||||
|
|
||||||
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
if (scrollId != null) {
|
if (scrollId != null) {
|
||||||
builder.field("_scroll_id", scrollId);
|
builder.field(SCROLL_ID.getPreferredName(), scrollId);
|
||||||
}
|
}
|
||||||
builder.field("took", tookInMillis);
|
builder.field(TOOK.getPreferredName(), tookInMillis);
|
||||||
builder.field("timed_out", isTimedOut());
|
builder.field(TIMED_OUT.getPreferredName(), isTimedOut());
|
||||||
if (isTerminatedEarly() != null) {
|
if (isTerminatedEarly() != null) {
|
||||||
builder.field("terminated_early", isTerminatedEarly());
|
builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly());
|
||||||
}
|
}
|
||||||
if (getNumReducePhases() != 1) {
|
if (getNumReducePhases() != 1) {
|
||||||
builder.field("num_reduce_phases", getNumReducePhases());
|
builder.field(NUM_REDUCE_PHASES.getPreferredName(), getNumReducePhases());
|
||||||
}
|
}
|
||||||
RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(),
|
RestActions.buildBroadcastShardsHeader(builder, params, getTotalShards(), getSuccessfulShards(), getFailedShards(),
|
||||||
getShardFailures());
|
getShardFailures());
|
||||||
|
@ -206,10 +221,89 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static SearchResponse fromXContent(XContentParser parser) throws IOException {
|
||||||
|
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||||
|
XContentParser.Token token;
|
||||||
|
String currentFieldName = null;
|
||||||
|
SearchHits hits = null;
|
||||||
|
Aggregations aggs = null;
|
||||||
|
Suggest suggest = null;
|
||||||
|
SearchProfileShardResults profile = null;
|
||||||
|
boolean timedOut = false;
|
||||||
|
Boolean terminatedEarly = null;
|
||||||
|
int numReducePhases = 1;
|
||||||
|
long tookInMillis = -1;
|
||||||
|
int successfulShards = -1;
|
||||||
|
int totalShards = -1;
|
||||||
|
String scrollId = null;
|
||||||
|
List<ShardSearchFailure> failures = new ArrayList<>();
|
||||||
|
while((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
|
currentFieldName = parser.currentName();
|
||||||
|
} else if (token.isValue()) {
|
||||||
|
if (SCROLL_ID.match(currentFieldName)) {
|
||||||
|
scrollId = parser.text();
|
||||||
|
} else if (TOOK.match(currentFieldName)) {
|
||||||
|
tookInMillis = parser.longValue();
|
||||||
|
} else if (TIMED_OUT.match(currentFieldName)) {
|
||||||
|
timedOut = parser.booleanValue();
|
||||||
|
} else if (TERMINATED_EARLY.match(currentFieldName)) {
|
||||||
|
terminatedEarly = parser.booleanValue();
|
||||||
|
} else if (NUM_REDUCE_PHASES.match(currentFieldName)) {
|
||||||
|
numReducePhases = parser.intValue();
|
||||||
|
} else {
|
||||||
|
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||||
|
}
|
||||||
|
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||||
|
if (SearchHits.Fields.HITS.equals(currentFieldName)) {
|
||||||
|
hits = SearchHits.fromXContent(parser);
|
||||||
|
} else if (Aggregations.AGGREGATIONS_FIELD.equals(currentFieldName)) {
|
||||||
|
aggs = Aggregations.fromXContent(parser);
|
||||||
|
} else if (Suggest.NAME.equals(currentFieldName)) {
|
||||||
|
suggest = Suggest.fromXContent(parser);
|
||||||
|
} else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) {
|
||||||
|
profile = SearchProfileShardResults.fromXContent(parser);
|
||||||
|
} else if (RestActions._SHARDS_FIELD.match(currentFieldName)) {
|
||||||
|
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
|
currentFieldName = parser.currentName();
|
||||||
|
} else if (token.isValue()) {
|
||||||
|
if (RestActions.FAILED_FIELD.match(currentFieldName)) {
|
||||||
|
parser.intValue(); // we don't need it but need to consume it
|
||||||
|
} else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName)) {
|
||||||
|
successfulShards = parser.intValue();
|
||||||
|
} else if (RestActions.TOTAL_FIELD.match(currentFieldName)) {
|
||||||
|
totalShards = parser.intValue();
|
||||||
|
} else {
|
||||||
|
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||||
|
}
|
||||||
|
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||||
|
if (RestActions.FAILURES_FIELD.match(currentFieldName)) {
|
||||||
|
while((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||||
|
failures.add(ShardSearchFailure.fromXContent(parser));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throwUnknownToken(token, parser.getTokenLocation());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throwUnknownField(currentFieldName, parser.getTokenLocation());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SearchResponseSections searchResponseSections = new SearchResponseSections(hits, aggs, suggest, timedOut, terminatedEarly,
|
||||||
|
profile, numReducePhases);
|
||||||
|
return new SearchResponse(searchResponseSections, scrollId, totalShards, successfulShards, tookInMillis,
|
||||||
|
failures.toArray(new ShardSearchFailure[failures.size()]));
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
internalResponse = readInternalSearchResponse(in);
|
internalResponse = new InternalSearchResponse(in);
|
||||||
totalShards = in.readVInt();
|
totalShards = in.readVInt();
|
||||||
successfulShards = in.readVInt();
|
successfulShards = in.readVInt();
|
||||||
int size = in.readVInt();
|
int size = in.readVInt();
|
||||||
|
|
|
@ -0,0 +1,122 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.action.search;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.common.xcontent.ToXContent;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.search.SearchHits;
|
||||||
|
import org.elasticsearch.search.aggregations.Aggregations;
|
||||||
|
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||||
|
import org.elasticsearch.search.profile.SearchProfileShardResults;
|
||||||
|
import org.elasticsearch.search.suggest.Suggest;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base class that holds the various sections which a search response is
|
||||||
|
* composed of (hits, aggs, suggestions etc.) and allows to retrieve them.
|
||||||
|
*
|
||||||
|
* The reason why this class exists is that the high level REST client uses its own classes
|
||||||
|
* to parse aggregations into, which are not serializable. This is the common part that can be
|
||||||
|
* shared between core and client.
|
||||||
|
*/
|
||||||
|
public class SearchResponseSections implements ToXContent {
|
||||||
|
|
||||||
|
protected final SearchHits hits;
|
||||||
|
protected final Aggregations aggregations;
|
||||||
|
protected final Suggest suggest;
|
||||||
|
protected final SearchProfileShardResults profileResults;
|
||||||
|
protected final boolean timedOut;
|
||||||
|
protected final Boolean terminatedEarly;
|
||||||
|
protected final int numReducePhases;
|
||||||
|
|
||||||
|
public SearchResponseSections(SearchHits hits, Aggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly,
|
||||||
|
SearchProfileShardResults profileResults, int numReducePhases) {
|
||||||
|
this.hits = hits;
|
||||||
|
this.aggregations = aggregations;
|
||||||
|
this.suggest = suggest;
|
||||||
|
this.profileResults = profileResults;
|
||||||
|
this.timedOut = timedOut;
|
||||||
|
this.terminatedEarly = terminatedEarly;
|
||||||
|
this.numReducePhases = numReducePhases;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final boolean timedOut() {
|
||||||
|
return this.timedOut;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final Boolean terminatedEarly() {
|
||||||
|
return this.terminatedEarly;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final SearchHits hits() {
|
||||||
|
return hits;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final Aggregations aggregations() {
|
||||||
|
return aggregations;
|
||||||
|
}
|
||||||
|
|
||||||
|
public final Suggest suggest() {
|
||||||
|
return suggest;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the number of reduce phases applied to obtain this search response
|
||||||
|
*/
|
||||||
|
public final int getNumReducePhases() {
|
||||||
|
return numReducePhases;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the profile results for this search response (including all shards).
|
||||||
|
* An empty map is returned if profiling was not enabled
|
||||||
|
*
|
||||||
|
* @return Profile results
|
||||||
|
*/
|
||||||
|
public final Map<String, ProfileShardResult> profile() {
|
||||||
|
if (profileResults == null) {
|
||||||
|
return Collections.emptyMap();
|
||||||
|
}
|
||||||
|
return profileResults.getShardResults();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
hits.toXContent(builder, params);
|
||||||
|
if (aggregations != null) {
|
||||||
|
aggregations.toXContent(builder, params);
|
||||||
|
}
|
||||||
|
if (suggest != null) {
|
||||||
|
suggest.toXContent(builder, params);
|
||||||
|
}
|
||||||
|
if (profileResults != null) {
|
||||||
|
profileResults.toXContent(builder, params);
|
||||||
|
}
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void writeTo(StreamOutput out) throws IOException {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
}
|
|
@ -24,6 +24,9 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.search.Scroll;
|
import org.elasticsearch.search.Scroll;
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.tasks.TaskId;
|
import org.elasticsearch.tasks.TaskId;
|
||||||
|
@ -33,7 +36,7 @@ import java.util.Objects;
|
||||||
|
|
||||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||||
|
|
||||||
public class SearchScrollRequest extends ActionRequest {
|
public class SearchScrollRequest extends ActionRequest implements ToXContentObject {
|
||||||
|
|
||||||
private String scrollId;
|
private String scrollId;
|
||||||
private Scroll scroll;
|
private Scroll scroll;
|
||||||
|
@ -145,4 +148,39 @@ public class SearchScrollRequest extends ActionRequest {
|
||||||
return "scrollId[" + scrollId + "], scroll[" + scroll + "]";
|
return "scrollId[" + scrollId + "], scroll[" + scroll + "]";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
builder.startObject();
|
||||||
|
builder.field("scroll_id", scrollId);
|
||||||
|
if (scroll != null) {
|
||||||
|
builder.field("scroll", scroll.keepAlive().getStringRep());
|
||||||
|
}
|
||||||
|
builder.endObject();
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a search scroll request from a request body provided through the REST layer.
|
||||||
|
* Values that are already be set and are also found while parsing will be overridden.
|
||||||
|
*/
|
||||||
|
public void fromXContent(XContentParser parser) throws IOException {
|
||||||
|
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||||
|
throw new IllegalArgumentException("Malformed content, must start with an object");
|
||||||
|
} else {
|
||||||
|
XContentParser.Token token;
|
||||||
|
String currentFieldName = null;
|
||||||
|
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||||
|
if (token == XContentParser.Token.FIELD_NAME) {
|
||||||
|
currentFieldName = parser.currentName();
|
||||||
|
} else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
|
||||||
|
scrollId(parser.text());
|
||||||
|
} else if ("scroll".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) {
|
||||||
|
scroll(new Scroll(TimeValue.parseTimeValue(parser.text(), null, "scroll")));
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException("Unknown parameter [" + currentFieldName
|
||||||
|
+ "] in request body or parameter is of the wrong type[" + token + "] ");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.AllocationId;
|
import org.elasticsearch.cluster.routing.AllocationId;
|
||||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||||
|
@ -52,7 +51,6 @@ import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|
||||||
import org.elasticsearch.index.IndexNotFoundException;
|
import org.elasticsearch.index.IndexNotFoundException;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
|
@ -179,8 +177,8 @@ public abstract class TransportReplicationAction<
|
||||||
Request shardRequest, IndexShard primary) throws Exception;
|
Request shardRequest, IndexShard primary) throws Exception;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Synchronous replica operation on nodes with replica copies. This is done under the lock form
|
* Synchronously execute the specified replica operation. This is done under a permit from
|
||||||
* {@link IndexShard#acquireReplicaOperationLock(long, ActionListener, String)}
|
* {@link IndexShard#acquireReplicaOperationPermit(long, ActionListener, String)}.
|
||||||
*
|
*
|
||||||
* @param shardRequest the request to the replica shard
|
* @param shardRequest the request to the replica shard
|
||||||
* @param replica the replica shard to perform the operation on
|
* @param replica the replica shard to perform the operation on
|
||||||
|
@ -584,7 +582,7 @@ public abstract class TransportReplicationAction<
|
||||||
throw new ShardNotFoundException(this.replica.shardId(), "expected aID [{}] but found [{}]", targetAllocationID,
|
throw new ShardNotFoundException(this.replica.shardId(), "expected aID [{}] but found [{}]", targetAllocationID,
|
||||||
actualAllocationId);
|
actualAllocationId);
|
||||||
}
|
}
|
||||||
replica.acquireReplicaOperationLock(request.primaryTerm, this, executor);
|
replica.acquireReplicaOperationPermit(request.primaryTerm, this, executor);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -921,7 +919,7 @@ public abstract class TransportReplicationAction<
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
indexShard.acquirePrimaryOperationLock(onAcquired, executor);
|
indexShard.acquirePrimaryOperationPermit(onAcquired, executor);
|
||||||
}
|
}
|
||||||
|
|
||||||
class ShardReference implements Releasable {
|
class ShardReference implements Releasable {
|
||||||
|
@ -1013,7 +1011,7 @@ public abstract class TransportReplicationAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
localCheckpoint = in.readZLong();
|
localCheckpoint = in.readZLong();
|
||||||
allocationId = in.readString();
|
allocationId = in.readString();
|
||||||
|
@ -1024,7 +1022,7 @@ public abstract class TransportReplicationAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
out.writeZLong(localCheckpoint);
|
out.writeZLong(localCheckpoint);
|
||||||
out.writeString(allocationId);
|
out.writeString(allocationId);
|
||||||
|
@ -1193,7 +1191,7 @@ public abstract class TransportReplicationAction<
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
globalCheckpoint = in.readZLong();
|
globalCheckpoint = in.readZLong();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1201,7 +1199,7 @@ public abstract class TransportReplicationAction<
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
out.writeZLong(globalCheckpoint);
|
out.writeZLong(globalCheckpoint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,10 +44,8 @@ import org.elasticsearch.index.mapper.ParentFieldMapper;
|
||||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.script.CompiledScript;
|
|
||||||
import org.elasticsearch.script.ExecutableScript;
|
import org.elasticsearch.script.ExecutableScript;
|
||||||
import org.elasticsearch.script.Script;
|
import org.elasticsearch.script.Script;
|
||||||
import org.elasticsearch.script.ScriptContext;
|
|
||||||
import org.elasticsearch.script.ScriptService;
|
import org.elasticsearch.script.ScriptService;
|
||||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||||
import org.elasticsearch.search.lookup.SourceLookup;
|
import org.elasticsearch.search.lookup.SourceLookup;
|
||||||
|
@ -301,8 +299,8 @@ public class UpdateHelper extends AbstractComponent {
|
||||||
private Map<String, Object> executeScript(Script script, Map<String, Object> ctx) {
|
private Map<String, Object> executeScript(Script script, Map<String, Object> ctx) {
|
||||||
try {
|
try {
|
||||||
if (scriptService != null) {
|
if (scriptService != null) {
|
||||||
CompiledScript compiledScript = scriptService.compile(script, ScriptContext.Standard.UPDATE);
|
ExecutableScript.Factory factory = scriptService.compile(script, ExecutableScript.UPDATE_CONTEXT);
|
||||||
ExecutableScript executableScript = scriptService.executable(compiledScript, script.getParams());
|
ExecutableScript executableScript = factory.newInstance(script.getParams());
|
||||||
executableScript.setNextVar(ContextFields.CTX, ctx);
|
executableScript.setNextVar(ContextFields.CTX, ctx);
|
||||||
executableScript.run();
|
executableScript.run();
|
||||||
}
|
}
|
||||||
|
|
|
@ -138,7 +138,7 @@ public class ClusterBlock implements Streamable, ToXContent {
|
||||||
retryable = in.readBoolean();
|
retryable = in.readBoolean();
|
||||||
disableStatePersistence = in.readBoolean();
|
disableStatePersistence = in.readBoolean();
|
||||||
status = RestStatus.readFrom(in);
|
status = RestStatus.readFrom(in);
|
||||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||||
allowReleaseResources = in.readBoolean();
|
allowReleaseResources = in.readBoolean();
|
||||||
} else {
|
} else {
|
||||||
allowReleaseResources = false;
|
allowReleaseResources = false;
|
||||||
|
@ -156,7 +156,7 @@ public class ClusterBlock implements Streamable, ToXContent {
|
||||||
out.writeBoolean(retryable);
|
out.writeBoolean(retryable);
|
||||||
out.writeBoolean(disableStatePersistence);
|
out.writeBoolean(disableStatePersistence);
|
||||||
RestStatus.writeTo(out, status);
|
RestStatus.writeTo(out, status);
|
||||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||||
out.writeBoolean(allowReleaseResources);
|
out.writeBoolean(allowReleaseResources);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -210,7 +210,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
||||||
public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
|
public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
|
||||||
Builder builder = new Builder(in.readString());
|
Builder builder = new Builder(in.readString());
|
||||||
builder.order(in.readInt());
|
builder.order(in.readInt());
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
builder.patterns(in.readList(StreamInput::readString));
|
builder.patterns(in.readList(StreamInput::readString));
|
||||||
} else {
|
} else {
|
||||||
builder.patterns(Collections.singletonList(in.readString()));
|
builder.patterns(Collections.singletonList(in.readString()));
|
||||||
|
@ -245,7 +245,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeString(name);
|
out.writeString(name);
|
||||||
out.writeInt(order);
|
out.writeInt(order);
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
out.writeStringList(patterns);
|
out.writeStringList(patterns);
|
||||||
} else {
|
} else {
|
||||||
out.writeString(patterns.size() > 0 ? patterns.get(0) : "");
|
out.writeString(patterns.size() > 0 ? patterns.get(0) : "");
|
||||||
|
|
|
@ -196,7 +196,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
source().writeTo(out);
|
source().writeTo(out);
|
||||||
// routing
|
// routing
|
||||||
out.writeBoolean(routing().required());
|
out.writeBoolean(routing().required());
|
||||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// timestamp
|
// timestamp
|
||||||
out.writeBoolean(false); // enabled
|
out.writeBoolean(false); // enabled
|
||||||
out.writeString(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format());
|
out.writeString(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format());
|
||||||
|
@ -233,7 +233,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
||||||
source = CompressedXContent.readCompressedString(in);
|
source = CompressedXContent.readCompressedString(in);
|
||||||
// routing
|
// routing
|
||||||
routing = new Routing(in.readBoolean());
|
routing = new Routing(in.readBoolean());
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// timestamp
|
// timestamp
|
||||||
boolean enabled = in.readBoolean();
|
boolean enabled = in.readBoolean();
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing;
|
package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
|
@ -177,10 +178,20 @@ public class OperationRouting extends AbstractComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// if not, then use it as the index
|
// if not, then use it as the index
|
||||||
|
int routingHash = Murmur3HashFunction.hash(preference);
|
||||||
|
if (nodes.getMinNodeVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||||
|
// The AllocationService lists shards in a fixed order based on nodes
|
||||||
|
// so earlier versions of this class would have a tendency to
|
||||||
|
// select the same node across different shardIds.
|
||||||
|
// Better overall balancing can be achieved if each shardId opts
|
||||||
|
// for a different element in the list by also incorporating the
|
||||||
|
// shard ID into the hash of the user-supplied preference key.
|
||||||
|
routingHash = 31 * routingHash + indexShard.shardId.hashCode();
|
||||||
|
}
|
||||||
if (awarenessAttributes.length == 0) {
|
if (awarenessAttributes.length == 0) {
|
||||||
return indexShard.activeInitializingShardsIt(Murmur3HashFunction.hash(preference));
|
return indexShard.activeInitializingShardsIt(routingHash);
|
||||||
} else {
|
} else {
|
||||||
return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, Murmur3HashFunction.hash(preference));
|
return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, routingHash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.lucene.util.CollectionUtil;
|
import org.apache.lucene.util.CollectionUtil;
|
||||||
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
|
@ -1018,9 +1019,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||||
* this method does nothing.
|
* this method does nothing.
|
||||||
*/
|
*/
|
||||||
public static boolean assertShardStats(RoutingNodes routingNodes) {
|
public static boolean assertShardStats(RoutingNodes routingNodes) {
|
||||||
boolean run = false;
|
if (!Assertions.ENABLED) {
|
||||||
assert (run = true); // only run if assertions are enabled!
|
|
||||||
if (!run) {
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
int unassignedPrimaryCount = 0;
|
int unassignedPrimaryCount = 0;
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.service;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
import org.apache.logging.log4j.util.Supplier;
|
||||||
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
import org.elasticsearch.cluster.AckedClusterStateTaskListener;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
@ -666,9 +667,7 @@ public class MasterService extends AbstractLifecycleComponent {
|
||||||
assert clusterTasksResult.executionResults.size() == taskInputs.updateTasks.size()
|
assert clusterTasksResult.executionResults.size() == taskInputs.updateTasks.size()
|
||||||
: String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", taskInputs.updateTasks.size(),
|
: String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", taskInputs.updateTasks.size(),
|
||||||
taskInputs.updateTasks.size() == 1 ? "" : "s", clusterTasksResult.executionResults.size());
|
taskInputs.updateTasks.size() == 1 ? "" : "s", clusterTasksResult.executionResults.size());
|
||||||
boolean assertsEnabled = false;
|
if (Assertions.ENABLED) {
|
||||||
assert (assertsEnabled = true);
|
|
||||||
if (assertsEnabled) {
|
|
||||||
ClusterTasksResult<Object> finalClusterTasksResult = clusterTasksResult;
|
ClusterTasksResult<Object> finalClusterTasksResult = clusterTasksResult;
|
||||||
taskInputs.updateTasks.forEach(updateTask -> {
|
taskInputs.updateTasks.forEach(updateTask -> {
|
||||||
assert finalClusterTasksResult.executionResults.containsKey(updateTask.task) :
|
assert finalClusterTasksResult.executionResults.containsKey(updateTask.task) :
|
||||||
|
|
|
@ -23,6 +23,7 @@ import com.vividsolutions.jts.geom.Coordinate;
|
||||||
import com.vividsolutions.jts.geom.Geometry;
|
import com.vividsolutions.jts.geom.Geometry;
|
||||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||||
|
@ -58,9 +59,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
||||||
static {
|
static {
|
||||||
// if asserts are enabled we run the debug statements even if they are not logged
|
// if asserts are enabled we run the debug statements even if they are not logged
|
||||||
// to prevent exceptions only present if debug enabled
|
// to prevent exceptions only present if debug enabled
|
||||||
boolean debug = false;
|
DEBUG = Assertions.ENABLED;
|
||||||
assert debug = true;
|
|
||||||
DEBUG = debug;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final double DATELINE = 180;
|
public static final double DATELINE = 180;
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.lucene;
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.LeafReader;
|
import org.apache.lucene.index.LeafReader;
|
||||||
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.index.shard.ShardUtils;
|
import org.elasticsearch.index.shard.ShardUtils;
|
||||||
|
|
||||||
|
@ -152,10 +153,7 @@ public final class ShardCoreKeyMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
private synchronized boolean assertSize() {
|
private synchronized boolean assertSize() {
|
||||||
// this is heavy and should only used in assertions
|
if (!Assertions.ENABLED) {
|
||||||
boolean assertionsEnabled = false;
|
|
||||||
assert assertionsEnabled = true;
|
|
||||||
if (assertionsEnabled == false) {
|
|
||||||
throw new AssertionError("only run this if assertions are enabled");
|
throw new AssertionError("only run this if assertions are enabled");
|
||||||
}
|
}
|
||||||
Collection<Set<IndexReader.CacheKey>> values = indexToCoreKey.values();
|
Collection<Set<IndexReader.CacheKey>> values = indexToCoreKey.values();
|
||||||
|
|
|
@ -19,8 +19,6 @@
|
||||||
package org.elasticsearch.common.settings;
|
package org.elasticsearch.common.settings;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
|
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
|
||||||
import org.elasticsearch.transport.RemoteClusterService;
|
|
||||||
import org.elasticsearch.transport.RemoteClusterAware;
|
|
||||||
import org.elasticsearch.action.search.TransportSearchAction;
|
import org.elasticsearch.action.search.TransportSearchAction;
|
||||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||||
import org.elasticsearch.action.support.DestructiveOperations;
|
import org.elasticsearch.action.support.DestructiveOperations;
|
||||||
|
@ -88,6 +86,8 @@ import org.elasticsearch.search.SearchModule;
|
||||||
import org.elasticsearch.search.SearchService;
|
import org.elasticsearch.search.SearchService;
|
||||||
import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter;
|
import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
import org.elasticsearch.transport.RemoteClusterAware;
|
||||||
|
import org.elasticsearch.transport.RemoteClusterService;
|
||||||
import org.elasticsearch.transport.TcpTransport;
|
import org.elasticsearch.transport.TcpTransport;
|
||||||
import org.elasticsearch.transport.Transport;
|
import org.elasticsearch.transport.Transport;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
@ -304,6 +304,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
|
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
|
||||||
ScriptService.SCRIPT_MAX_SIZE_IN_BYTES,
|
ScriptService.SCRIPT_MAX_SIZE_IN_BYTES,
|
||||||
ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE,
|
ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE,
|
||||||
|
ScriptService.TYPES_ALLOWED_SETTING,
|
||||||
|
ScriptService.CONTEXTS_ALLOWED_SETTING,
|
||||||
IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING,
|
IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING,
|
||||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||||
|
@ -339,6 +341,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
|
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
|
||||||
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
|
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
|
||||||
ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING,
|
ZenDiscovery.MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING,
|
||||||
|
ZenDiscovery.MAX_PENDING_CLUSTER_STATES_SETTING,
|
||||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
|
||||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
|
||||||
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT,
|
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_RESOLVE_TIMEOUT,
|
||||||
|
|
|
@ -344,7 +344,7 @@ public final class Settings implements ToXContent {
|
||||||
final String setting,
|
final String setting,
|
||||||
final Boolean defaultValue,
|
final Boolean defaultValue,
|
||||||
final DeprecationLogger deprecationLogger) {
|
final DeprecationLogger deprecationLogger) {
|
||||||
if (indexVersion.before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (indexVersion.before(Version.V_6_0_0_alpha1)) {
|
||||||
//Only emit a warning if the setting's value is not a proper boolean
|
//Only emit a warning if the setting's value is not a proper boolean
|
||||||
final String value = get(setting, "false");
|
final String value = get(setting, "false");
|
||||||
if (Booleans.isBoolean(value) == false) {
|
if (Booleans.isBoolean(value) == false) {
|
||||||
|
|
|
@ -78,7 +78,7 @@ public final class TransportAddress implements Writeable {
|
||||||
* {@link Version#V_5_0_2} as the hostString was not serialized
|
* {@link Version#V_5_0_2} as the hostString was not serialized
|
||||||
*/
|
*/
|
||||||
public TransportAddress(StreamInput in, @Nullable String hostString) throws IOException {
|
public TransportAddress(StreamInput in, @Nullable String hostString) throws IOException {
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // bwc layer for 5.x where we had more than one transport address
|
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // bwc layer for 5.x where we had more than one transport address
|
||||||
final short i = in.readShort();
|
final short i = in.readShort();
|
||||||
if(i != 1) { // we fail hard to ensure nobody tries to use some custom transport address impl even if that is difficult to add
|
if(i != 1) { // we fail hard to ensure nobody tries to use some custom transport address impl even if that is difficult to add
|
||||||
throw new AssertionError("illegal transport ID from node of version: " + in.getVersion() + " got: " + i + " expected: 1");
|
throw new AssertionError("illegal transport ID from node of version: " + in.getVersion() + " got: " + i + " expected: 1");
|
||||||
|
@ -101,7 +101,7 @@ public final class TransportAddress implements Writeable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
out.writeShort((short)1); // this maps to InetSocketTransportAddress in 5.x
|
out.writeShort((short)1); // this maps to InetSocketTransportAddress in 5.x
|
||||||
}
|
}
|
||||||
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
|
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
|
||||||
|
|
|
@ -91,19 +91,19 @@ public final class QueueResizingEsThreadPoolExecutor extends EsThreadPoolExecuto
|
||||||
* Calculate task rate (λ), for a fixed number of tasks and time it took those tasks to be measured
|
* Calculate task rate (λ), for a fixed number of tasks and time it took those tasks to be measured
|
||||||
*
|
*
|
||||||
* @param totalNumberOfTasks total number of tasks that were measured
|
* @param totalNumberOfTasks total number of tasks that were measured
|
||||||
* @param totalFrameFrameNanos nanoseconds during which the tasks were received
|
* @param totalFrameTaskNanos nanoseconds during which the tasks were received
|
||||||
* @return the rate of tasks in the system
|
* @return the rate of tasks in the system
|
||||||
*/
|
*/
|
||||||
static double calculateLambda(final int totalNumberOfTasks, final long totalFrameFrameNanos) {
|
static double calculateLambda(final int totalNumberOfTasks, final long totalFrameTaskNanos) {
|
||||||
assert totalFrameFrameNanos > 0 : "cannot calculate for instantaneous tasks";
|
assert totalFrameTaskNanos > 0 : "cannot calculate for instantaneous tasks, got: " + totalFrameTaskNanos;
|
||||||
assert totalNumberOfTasks > 0 : "cannot calculate for no tasks";
|
assert totalNumberOfTasks > 0 : "cannot calculate for no tasks, got: " + totalNumberOfTasks;
|
||||||
// There is no set execution time, instead we adjust the time window based on the
|
// There is no set execution time, instead we adjust the time window based on the
|
||||||
// number of completed tasks, so there is no background thread required to update the
|
// number of completed tasks, so there is no background thread required to update the
|
||||||
// queue size at a regular interval. This means we need to calculate our λ by the
|
// queue size at a regular interval. This means we need to calculate our λ by the
|
||||||
// total runtime, rather than a fixed interval.
|
// total runtime, rather than a fixed interval.
|
||||||
|
|
||||||
// λ = total tasks divided by measurement time
|
// λ = total tasks divided by measurement time
|
||||||
return (double) totalNumberOfTasks / totalFrameFrameNanos;
|
return (double) totalNumberOfTasks / totalFrameTaskNanos;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.util.concurrent;
|
package org.elasticsearch.common.util.concurrent;
|
||||||
|
|
||||||
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
import org.elasticsearch.index.engine.EngineException;
|
import org.elasticsearch.index.engine.EngineException;
|
||||||
|
|
||||||
|
@ -35,9 +36,7 @@ public class ReleasableLock implements Releasable {
|
||||||
|
|
||||||
public ReleasableLock(Lock lock) {
|
public ReleasableLock(Lock lock) {
|
||||||
this.lock = lock;
|
this.lock = lock;
|
||||||
boolean useHoldingThreads = false;
|
if (Assertions.ENABLED) {
|
||||||
assert (useHoldingThreads = true);
|
|
||||||
if (useHoldingThreads) {
|
|
||||||
holdingThreads = new ThreadLocal<>();
|
holdingThreads = new ThreadLocal<>();
|
||||||
} else {
|
} else {
|
||||||
holdingThreads = null;
|
holdingThreads = null;
|
||||||
|
|
|
@ -395,6 +395,7 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
||||||
FLOAT(VALUE_NUMBER, VALUE_STRING),
|
FLOAT(VALUE_NUMBER, VALUE_STRING),
|
||||||
FLOAT_OR_NULL(VALUE_NUMBER, VALUE_STRING, VALUE_NULL),
|
FLOAT_OR_NULL(VALUE_NUMBER, VALUE_STRING, VALUE_NULL),
|
||||||
DOUBLE(VALUE_NUMBER, VALUE_STRING),
|
DOUBLE(VALUE_NUMBER, VALUE_STRING),
|
||||||
|
DOUBLE_OR_NULL(VALUE_NUMBER, VALUE_STRING, VALUE_NULL),
|
||||||
LONG(VALUE_NUMBER, VALUE_STRING),
|
LONG(VALUE_NUMBER, VALUE_STRING),
|
||||||
LONG_OR_NULL(VALUE_NUMBER, VALUE_STRING, VALUE_NULL),
|
LONG_OR_NULL(VALUE_NUMBER, VALUE_STRING, VALUE_NULL),
|
||||||
INT(VALUE_NUMBER, VALUE_STRING),
|
INT(VALUE_NUMBER, VALUE_STRING),
|
||||||
|
@ -411,7 +412,8 @@ public final class ObjectParser<Value, Context> extends AbstractObjectParser<Val
|
||||||
OBJECT_OR_STRING(START_OBJECT, VALUE_STRING),
|
OBJECT_OR_STRING(START_OBJECT, VALUE_STRING),
|
||||||
OBJECT_ARRAY_BOOLEAN_OR_STRING(START_OBJECT, START_ARRAY, VALUE_BOOLEAN, VALUE_STRING),
|
OBJECT_ARRAY_BOOLEAN_OR_STRING(START_OBJECT, START_ARRAY, VALUE_BOOLEAN, VALUE_STRING),
|
||||||
OBJECT_ARRAY_OR_STRING(START_OBJECT, START_ARRAY, VALUE_STRING),
|
OBJECT_ARRAY_OR_STRING(START_OBJECT, START_ARRAY, VALUE_STRING),
|
||||||
VALUE(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING);
|
VALUE(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING),
|
||||||
|
VALUE_OBJECT_ARRAY(VALUE_BOOLEAN, VALUE_NULL, VALUE_EMBEDDED_OBJECT, VALUE_NUMBER, VALUE_STRING, START_OBJECT, START_ARRAY);
|
||||||
|
|
||||||
private final EnumSet<XContentParser.Token> tokens;
|
private final EnumSet<XContentParser.Token> tokens;
|
||||||
|
|
||||||
|
|
|
@ -111,10 +111,9 @@ public final class XContentParserUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method expects that the current token is a {@code XContentParser.Token.FIELD_NAME} and
|
* This method expects that the current field name is the concatenation of a type, a delimiter and a name
|
||||||
* that the current field name is the concatenation of a type, delimiter and name (ex: terms#foo
|
* (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry},
|
||||||
* where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry}, "#" is
|
* "#" is the delimiter and "foo" the name of the object to parse).
|
||||||
* the delimiter and "foo" the name of the object to parse).
|
|
||||||
*
|
*
|
||||||
* The method splits the field's name to extract the type and name and then parses the object
|
* The method splits the field's name to extract the type and name and then parses the object
|
||||||
* using the {@link XContentParser#namedObject(Class, String, Object)} method.
|
* using the {@link XContentParser#namedObject(Class, String, Object)} method.
|
||||||
|
@ -128,7 +127,6 @@ public final class XContentParserUtils {
|
||||||
* from the field's name
|
* from the field's name
|
||||||
*/
|
*/
|
||||||
public static <T> T parseTypedKeysObject(XContentParser parser, String delimiter, Class<T> objectClass) throws IOException {
|
public static <T> T parseTypedKeysObject(XContentParser parser, String delimiter, Class<T> objectClass) throws IOException {
|
||||||
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
|
|
||||||
String currentFieldName = parser.currentName();
|
String currentFieldName = parser.currentName();
|
||||||
if (Strings.hasLength(currentFieldName)) {
|
if (Strings.hasLength(currentFieldName)) {
|
||||||
int position = currentFieldName.indexOf(delimiter);
|
int position = currentFieldName.indexOf(delimiter);
|
||||||
|
|
|
@ -23,8 +23,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.Diff;
|
import org.elasticsearch.cluster.Diff;
|
||||||
import org.elasticsearch.cluster.IncompatibleClusterStateVersionException;
|
import org.elasticsearch.cluster.IncompatibleClusterStateVersionException;
|
||||||
|
@ -60,61 +60,53 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Locale;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.function.Supplier;
|
|
||||||
|
|
||||||
public class PublishClusterStateAction extends AbstractComponent {
|
public class PublishClusterStateAction extends AbstractComponent {
|
||||||
|
|
||||||
public static final String SEND_ACTION_NAME = "internal:discovery/zen/publish/send";
|
public static final String SEND_ACTION_NAME = "internal:discovery/zen/publish/send";
|
||||||
public static final String COMMIT_ACTION_NAME = "internal:discovery/zen/publish/commit";
|
public static final String COMMIT_ACTION_NAME = "internal:discovery/zen/publish/commit";
|
||||||
|
|
||||||
public static final String SETTINGS_MAX_PENDING_CLUSTER_STATES = "discovery.zen.publish.max_pending_cluster_states";
|
public interface IncomingClusterStateListener {
|
||||||
|
|
||||||
public interface NewPendingClusterStateListener {
|
/**
|
||||||
|
* called when a new incoming cluster state has been received.
|
||||||
|
* Should validate the incoming state and throw an exception if it's not a valid successor state.
|
||||||
|
*/
|
||||||
|
void onIncomingClusterState(ClusterState incomingState);
|
||||||
|
|
||||||
/** a new cluster state has been committed and is ready to process via {@link #pendingStatesQueue()} */
|
/**
|
||||||
void onNewClusterState(String reason);
|
* called when a cluster state has been committed and is ready to be processed
|
||||||
|
*/
|
||||||
|
void onClusterStateCommitted(String stateUUID, ActionListener<Void> processedListener);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final TransportService transportService;
|
private final TransportService transportService;
|
||||||
private final NamedWriteableRegistry namedWriteableRegistry;
|
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||||
private final Supplier<ClusterState> clusterStateSupplier;
|
private final IncomingClusterStateListener incomingClusterStateListener;
|
||||||
private final NewPendingClusterStateListener newPendingClusterStatelistener;
|
|
||||||
private final DiscoverySettings discoverySettings;
|
private final DiscoverySettings discoverySettings;
|
||||||
private final ClusterName clusterName;
|
|
||||||
private final PendingClusterStatesQueue pendingStatesQueue;
|
|
||||||
|
|
||||||
public PublishClusterStateAction(
|
public PublishClusterStateAction(
|
||||||
Settings settings,
|
Settings settings,
|
||||||
TransportService transportService,
|
TransportService transportService,
|
||||||
NamedWriteableRegistry namedWriteableRegistry,
|
NamedWriteableRegistry namedWriteableRegistry,
|
||||||
Supplier<ClusterState> clusterStateSupplier,
|
IncomingClusterStateListener incomingClusterStateListener,
|
||||||
NewPendingClusterStateListener listener,
|
DiscoverySettings discoverySettings) {
|
||||||
DiscoverySettings discoverySettings,
|
|
||||||
ClusterName clusterName) {
|
|
||||||
super(settings);
|
super(settings);
|
||||||
this.transportService = transportService;
|
this.transportService = transportService;
|
||||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||||
this.clusterStateSupplier = clusterStateSupplier;
|
this.incomingClusterStateListener = incomingClusterStateListener;
|
||||||
this.newPendingClusterStatelistener = listener;
|
|
||||||
this.discoverySettings = discoverySettings;
|
this.discoverySettings = discoverySettings;
|
||||||
this.clusterName = clusterName;
|
|
||||||
this.pendingStatesQueue = new PendingClusterStatesQueue(logger, settings.getAsInt(SETTINGS_MAX_PENDING_CLUSTER_STATES, 25));
|
|
||||||
transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, false, false,
|
transportService.registerRequestHandler(SEND_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.SAME, false, false,
|
||||||
new SendClusterStateRequestHandler());
|
new SendClusterStateRequestHandler());
|
||||||
transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, false, false,
|
transportService.registerRequestHandler(COMMIT_ACTION_NAME, CommitClusterStateRequest::new, ThreadPool.Names.SAME, false, false,
|
||||||
new CommitClusterStateRequestHandler());
|
new CommitClusterStateRequestHandler());
|
||||||
}
|
}
|
||||||
|
|
||||||
public PendingClusterStatesQueue pendingStatesQueue() {
|
|
||||||
return pendingStatesQueue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* publishes a cluster change event to other nodes. if at least minMasterNodes acknowledge the change it is committed and will
|
* publishes a cluster change event to other nodes. if at least minMasterNodes acknowledge the change it is committed and will
|
||||||
* be processed by the master and the other nodes.
|
* be processed by the master and the other nodes.
|
||||||
|
@ -387,7 +379,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
final ClusterState incomingState;
|
final ClusterState incomingState;
|
||||||
// If true we received full cluster state - otherwise diffs
|
// If true we received full cluster state - otherwise diffs
|
||||||
if (in.readBoolean()) {
|
if (in.readBoolean()) {
|
||||||
incomingState = ClusterState.readFrom(in, clusterStateSupplier.get().nodes().getLocalNode());
|
incomingState = ClusterState.readFrom(in, transportService.getLocalNode());
|
||||||
logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(),
|
logger.debug("received full cluster state version [{}] with size [{}]", incomingState.version(),
|
||||||
request.bytes().length());
|
request.bytes().length());
|
||||||
} else if (lastSeenClusterState != null) {
|
} else if (lastSeenClusterState != null) {
|
||||||
|
@ -399,10 +391,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
logger.debug("received diff for but don't have any local cluster state - requesting full state");
|
logger.debug("received diff for but don't have any local cluster state - requesting full state");
|
||||||
throw new IncompatibleClusterStateVersionException("have no local cluster state");
|
throw new IncompatibleClusterStateVersionException("have no local cluster state");
|
||||||
}
|
}
|
||||||
// sanity check incoming state
|
incomingClusterStateListener.onIncomingClusterState(incomingState);
|
||||||
validateIncomingState(incomingState, lastSeenClusterState);
|
|
||||||
|
|
||||||
pendingStatesQueue.addPending(incomingState);
|
|
||||||
lastSeenClusterState = incomingState;
|
lastSeenClusterState = incomingState;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -411,56 +400,22 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// package private for testing
|
|
||||||
|
|
||||||
/**
|
|
||||||
* does simple sanity check of the incoming cluster state. Throws an exception on rejections.
|
|
||||||
*/
|
|
||||||
void validateIncomingState(ClusterState incomingState, ClusterState lastSeenClusterState) {
|
|
||||||
final ClusterName incomingClusterName = incomingState.getClusterName();
|
|
||||||
if (!incomingClusterName.equals(this.clusterName)) {
|
|
||||||
logger.warn("received cluster state from [{}] which is also master but with a different cluster name [{}]",
|
|
||||||
incomingState.nodes().getMasterNode(), incomingClusterName);
|
|
||||||
throw new IllegalStateException("received state from a node that is not part of the cluster");
|
|
||||||
}
|
|
||||||
final ClusterState clusterState = clusterStateSupplier.get();
|
|
||||||
|
|
||||||
if (clusterState.nodes().getLocalNode().equals(incomingState.nodes().getLocalNode()) == false) {
|
|
||||||
logger.warn("received a cluster state from [{}] and not part of the cluster, should not happen",
|
|
||||||
incomingState.nodes().getMasterNode());
|
|
||||||
throw new IllegalStateException("received state with a local node that does not match the current local node");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ZenDiscovery.shouldIgnoreOrRejectNewClusterState(logger, clusterState, incomingState)) {
|
|
||||||
String message = String.format(
|
|
||||||
Locale.ROOT,
|
|
||||||
"rejecting cluster state version [%d] uuid [%s] received from [%s]",
|
|
||||||
incomingState.version(),
|
|
||||||
incomingState.stateUUID(),
|
|
||||||
incomingState.nodes().getMasterNodeId()
|
|
||||||
);
|
|
||||||
logger.warn(message);
|
|
||||||
throw new IllegalStateException(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void handleCommitRequest(CommitClusterStateRequest request, final TransportChannel channel) {
|
protected void handleCommitRequest(CommitClusterStateRequest request, final TransportChannel channel) {
|
||||||
final ClusterState state = pendingStatesQueue.markAsCommitted(request.stateUUID,
|
incomingClusterStateListener.onClusterStateCommitted(request.stateUUID, new ActionListener<Void>() {
|
||||||
new PendingClusterStatesQueue.StateProcessedListener() {
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewClusterStateProcessed() {
|
public void onResponse(Void ignore) {
|
||||||
try {
|
try {
|
||||||
// send a response to the master to indicate that this cluster state has been processed post committing it.
|
// send a response to the master to indicate that this cluster state has been processed post committing it.
|
||||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.debug("failed to send response on cluster state processed", e);
|
logger.debug("failed to send response on cluster state processed", e);
|
||||||
onNewClusterStateFailed(e);
|
onFailure(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewClusterStateFailed(Exception e) {
|
public void onFailure(Exception e) {
|
||||||
try {
|
try {
|
||||||
channel.sendResponse(e);
|
channel.sendResponse(e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
|
@ -469,10 +424,6 @@ public class PublishClusterStateAction extends AbstractComponent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
if (state != null) {
|
|
||||||
newPendingClusterStatelistener.onNewClusterState("master " + state.nodes().getMasterNode() +
|
|
||||||
" committed version [" + state.version() + "]");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private class SendClusterStateRequestHandler implements TransportRequestHandler<BytesTransportRequest> {
|
private class SendClusterStateRequestHandler implements TransportRequestHandler<BytesTransportRequest> {
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.logging.log4j.util.Supplier;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
@ -56,6 +57,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.discovery.Discovery;
|
import org.elasticsearch.discovery.Discovery;
|
||||||
import org.elasticsearch.discovery.DiscoverySettings;
|
import org.elasticsearch.discovery.DiscoverySettings;
|
||||||
import org.elasticsearch.discovery.DiscoveryStats;
|
import org.elasticsearch.discovery.DiscoveryStats;
|
||||||
|
import org.elasticsearch.discovery.zen.PublishClusterStateAction.IncomingClusterStateListener;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||||
import org.elasticsearch.transport.TransportChannel;
|
import org.elasticsearch.transport.TransportChannel;
|
||||||
|
@ -82,7 +84,7 @@ import java.util.stream.Collectors;
|
||||||
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
||||||
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
|
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
|
||||||
|
|
||||||
public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider {
|
public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider, IncomingClusterStateListener {
|
||||||
|
|
||||||
public static final Setting<TimeValue> PING_TIMEOUT_SETTING =
|
public static final Setting<TimeValue> PING_TIMEOUT_SETTING =
|
||||||
Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope);
|
Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope);
|
||||||
|
@ -104,6 +106,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
Property.NodeScope);
|
Property.NodeScope);
|
||||||
public static final Setting<Boolean> MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING =
|
public static final Setting<Boolean> MASTER_ELECTION_IGNORE_NON_MASTER_PINGS_SETTING =
|
||||||
Setting.boolSetting("discovery.zen.master_election.ignore_non_master_pings", false, Property.NodeScope);
|
Setting.boolSetting("discovery.zen.master_election.ignore_non_master_pings", false, Property.NodeScope);
|
||||||
|
public static final Setting<Integer> MAX_PENDING_CLUSTER_STATES_SETTING =
|
||||||
|
Setting.intSetting("discovery.zen.publish.max_pending_cluster_states", 25, 1, Property.NodeScope);
|
||||||
|
|
||||||
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
|
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
|
||||||
|
|
||||||
|
@ -139,6 +143,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
|
|
||||||
private final JoinThreadControl joinThreadControl;
|
private final JoinThreadControl joinThreadControl;
|
||||||
|
|
||||||
|
private final PendingClusterStatesQueue pendingStatesQueue;
|
||||||
|
|
||||||
private final NodeJoinController nodeJoinController;
|
private final NodeJoinController nodeJoinController;
|
||||||
private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
|
private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
|
||||||
|
|
||||||
|
@ -197,16 +203,15 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
this.masterFD.addListener(new MasterNodeFailureListener());
|
this.masterFD.addListener(new MasterNodeFailureListener());
|
||||||
this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, clusterName);
|
this.nodesFD = new NodesFaultDetection(settings, threadPool, transportService, clusterName);
|
||||||
this.nodesFD.addListener(new NodeFaultDetectionListener());
|
this.nodesFD.addListener(new NodeFaultDetectionListener());
|
||||||
|
this.pendingStatesQueue = new PendingClusterStatesQueue(logger, MAX_PENDING_CLUSTER_STATES_SETTING.get(settings));
|
||||||
|
|
||||||
this.publishClusterState =
|
this.publishClusterState =
|
||||||
new PublishClusterStateAction(
|
new PublishClusterStateAction(
|
||||||
settings,
|
settings,
|
||||||
transportService,
|
transportService,
|
||||||
namedWriteableRegistry,
|
namedWriteableRegistry,
|
||||||
this::clusterState,
|
this,
|
||||||
new NewPendingClusterStateListener(),
|
discoverySettings);
|
||||||
discoverySettings,
|
|
||||||
clusterName);
|
|
||||||
this.membership = new MembershipAction(settings, transportService, new MembershipListener());
|
this.membership = new MembershipAction(settings, transportService, new MembershipListener());
|
||||||
this.joinThreadControl = new JoinThreadControl();
|
this.joinThreadControl = new JoinThreadControl();
|
||||||
|
|
||||||
|
@ -311,7 +316,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
throw new FailedToCommitClusterStateException("state was mutated while calculating new CS update");
|
throw new FailedToCommitClusterStateException("state was mutated while calculating new CS update");
|
||||||
}
|
}
|
||||||
|
|
||||||
publishClusterState.pendingStatesQueue().addPending(newState);
|
pendingStatesQueue.addPending(newState);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
|
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
|
||||||
|
@ -321,7 +326,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
newState.version(), electMaster.minimumMasterNodes());
|
newState.version(), electMaster.minimumMasterNodes());
|
||||||
|
|
||||||
synchronized (stateMutex) {
|
synchronized (stateMutex) {
|
||||||
publishClusterState.pendingStatesQueue().failAllStatesAndClear(
|
pendingStatesQueue.failAllStatesAndClear(
|
||||||
new ElasticsearchException("failed to publish cluster state"));
|
new ElasticsearchException("failed to publish cluster state"));
|
||||||
|
|
||||||
rejoin("zen-disco-failed-to-publish");
|
rejoin("zen-disco-failed-to-publish");
|
||||||
|
@ -332,7 +337,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
final DiscoveryNode localNode = newState.getNodes().getLocalNode();
|
final DiscoveryNode localNode = newState.getNodes().getLocalNode();
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
final AtomicBoolean processedOrFailed = new AtomicBoolean();
|
final AtomicBoolean processedOrFailed = new AtomicBoolean();
|
||||||
publishClusterState.pendingStatesQueue().markAsCommitted(newState.stateUUID(),
|
pendingStatesQueue.markAsCommitted(newState.stateUUID(),
|
||||||
new PendingClusterStatesQueue.StateProcessedListener() {
|
new PendingClusterStatesQueue.StateProcessedListener() {
|
||||||
@Override
|
@Override
|
||||||
public void onNewClusterStateProcessed() {
|
public void onNewClusterStateProcessed() {
|
||||||
|
@ -391,7 +396,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DiscoveryStats stats() {
|
public DiscoveryStats stats() {
|
||||||
PendingClusterStateStats queueStats = publishClusterState.pendingStatesQueue().stats();
|
PendingClusterStateStats queueStats = pendingStatesQueue.stats();
|
||||||
return new DiscoveryStats(queueStats);
|
return new DiscoveryStats(queueStats);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,11 +414,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
|
|
||||||
// used for testing
|
// used for testing
|
||||||
public ClusterState[] pendingClusterStates() {
|
public ClusterState[] pendingClusterStates() {
|
||||||
return publishClusterState.pendingStatesQueue().pendingClusterStates();
|
return pendingStatesQueue.pendingClusterStates();
|
||||||
}
|
}
|
||||||
|
|
||||||
PendingClusterStatesQueue pendingClusterStatesQueue() {
|
PendingClusterStatesQueue pendingClusterStatesQueue() {
|
||||||
return publishClusterState.pendingStatesQueue();
|
return pendingStatesQueue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -703,7 +708,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
synchronized (stateMutex) {
|
synchronized (stateMutex) {
|
||||||
if (localNodeMaster() == false && masterNode.equals(committedState.get().nodes().getMasterNode())) {
|
if (localNodeMaster() == false && masterNode.equals(committedState.get().nodes().getMasterNode())) {
|
||||||
// flush any pending cluster states from old master, so it will not be set as master again
|
// flush any pending cluster states from old master, so it will not be set as master again
|
||||||
publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason));
|
pendingStatesQueue.failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason));
|
||||||
rejoin("master left (reason = " + reason + ")");
|
rejoin("master left (reason = " + reason + ")");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -713,7 +718,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
boolean processNextCommittedClusterState(String reason) {
|
boolean processNextCommittedClusterState(String reason) {
|
||||||
assert Thread.holdsLock(stateMutex);
|
assert Thread.holdsLock(stateMutex);
|
||||||
|
|
||||||
final ClusterState newClusterState = publishClusterState.pendingStatesQueue().getNextClusterStateToProcess();
|
final ClusterState newClusterState = pendingStatesQueue.getNextClusterStateToProcess();
|
||||||
final ClusterState currentState = committedState.get();
|
final ClusterState currentState = committedState.get();
|
||||||
final ClusterState adaptedNewClusterState;
|
final ClusterState adaptedNewClusterState;
|
||||||
// all pending states have been processed
|
// all pending states have been processed
|
||||||
|
@ -742,7 +747,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
try {
|
try {
|
||||||
publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, e);
|
pendingStatesQueue.markAsFailed(newClusterState, e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
||||||
|
@ -811,7 +816,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
@Override
|
@Override
|
||||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||||
try {
|
try {
|
||||||
publishClusterState.pendingStatesQueue().markAsProcessed(newClusterState);
|
pendingStatesQueue.markAsProcessed(newClusterState);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
onFailure(source, e);
|
onFailure(source, e);
|
||||||
}
|
}
|
||||||
|
@ -823,7 +828,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
try {
|
try {
|
||||||
// TODO: use cluster state uuid instead of full cluster state so that we don't keep reference to CS around
|
// TODO: use cluster state uuid instead of full cluster state so that we don't keep reference to CS around
|
||||||
// for too long.
|
// for too long.
|
||||||
publishClusterState.pendingStatesQueue().markAsFailed(newClusterState, e);
|
pendingStatesQueue.markAsFailed(newClusterState, e);
|
||||||
} catch (Exception inner) {
|
} catch (Exception inner) {
|
||||||
inner.addSuppressed(e);
|
inner.addSuppressed(e);
|
||||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner);
|
||||||
|
@ -1066,16 +1071,64 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private class NewPendingClusterStateListener implements PublishClusterStateAction.NewPendingClusterStateListener {
|
@Override
|
||||||
|
public void onIncomingClusterState(ClusterState incomingState) {
|
||||||
|
validateIncomingState(logger, incomingState, committedState.get());
|
||||||
|
pendingStatesQueue.addPending(incomingState);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onNewClusterState(String reason) {
|
public void onClusterStateCommitted(String stateUUID, ActionListener<Void> processedListener) {
|
||||||
|
final ClusterState state = pendingStatesQueue.markAsCommitted(stateUUID,
|
||||||
|
new PendingClusterStatesQueue.StateProcessedListener() {
|
||||||
|
@Override
|
||||||
|
public void onNewClusterStateProcessed() {
|
||||||
|
processedListener.onResponse(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onNewClusterStateFailed(Exception e) {
|
||||||
|
processedListener.onFailure(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (state != null) {
|
||||||
synchronized (stateMutex) {
|
synchronized (stateMutex) {
|
||||||
processNextCommittedClusterState(reason);
|
processNextCommittedClusterState("master " + state.nodes().getMasterNode() +
|
||||||
|
" committed version [" + state.version() + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* does simple sanity check of the incoming cluster state. Throws an exception on rejections.
|
||||||
|
*/
|
||||||
|
static void validateIncomingState(Logger logger, ClusterState incomingState, ClusterState lastState) {
|
||||||
|
final ClusterName incomingClusterName = incomingState.getClusterName();
|
||||||
|
if (!incomingClusterName.equals(lastState.getClusterName())) {
|
||||||
|
logger.warn("received cluster state from [{}] which is also master but with a different cluster name [{}]",
|
||||||
|
incomingState.nodes().getMasterNode(), incomingClusterName);
|
||||||
|
throw new IllegalStateException("received state from a node that is not part of the cluster");
|
||||||
|
}
|
||||||
|
if (lastState.nodes().getLocalNode().equals(incomingState.nodes().getLocalNode()) == false) {
|
||||||
|
logger.warn("received a cluster state from [{}] and not part of the cluster, should not happen",
|
||||||
|
incomingState.nodes().getMasterNode());
|
||||||
|
throw new IllegalStateException("received state with a local node that does not match the current local node");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (shouldIgnoreOrRejectNewClusterState(logger, lastState, incomingState)) {
|
||||||
|
String message = String.format(
|
||||||
|
Locale.ROOT,
|
||||||
|
"rejecting cluster state version [%d] uuid [%s] received from [%s]",
|
||||||
|
incomingState.version(),
|
||||||
|
incomingState.stateUUID(),
|
||||||
|
incomingState.nodes().getMasterNodeId()
|
||||||
|
);
|
||||||
|
logger.warn(message);
|
||||||
|
throw new IllegalStateException(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
private class MembershipListener implements MembershipAction.MembershipListener {
|
private class MembershipListener implements MembershipAction.MembershipListener {
|
||||||
@Override
|
@Override
|
||||||
public void onJoin(DiscoveryNode node, MembershipAction.JoinCallback callback) {
|
public void onJoin(DiscoveryNode node, MembershipAction.JoinCallback callback) {
|
||||||
|
|
|
@ -290,7 +290,7 @@ public class TransportNodesListGatewayStartedShards extends
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// legacy version
|
// legacy version
|
||||||
in.readLong();
|
in.readLong();
|
||||||
}
|
}
|
||||||
|
@ -304,7 +304,7 @@ public class TransportNodesListGatewayStartedShards extends
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||||
// legacy version
|
// legacy version
|
||||||
out.writeLong(-1L);
|
out.writeLong(-1L);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,91 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index;
|
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
|
||||||
import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
public class AlreadyExpiredException extends ElasticsearchException implements IgnoreOnRecoveryEngineException {
|
|
||||||
private String index;
|
|
||||||
private String type;
|
|
||||||
private String id;
|
|
||||||
private final long timestamp;
|
|
||||||
private final long ttl;
|
|
||||||
private final long now;
|
|
||||||
|
|
||||||
public AlreadyExpiredException(String index, String type, String id, long timestamp, long ttl, long now) {
|
|
||||||
super("already expired [" + index + "]/[" + type + "]/[" + id + "] due to expire at [" + (timestamp + ttl) + "] and was processed at [" + now + "]");
|
|
||||||
this.setIndex(index);
|
|
||||||
this.type = type;
|
|
||||||
this.id = id;
|
|
||||||
this.timestamp = timestamp;
|
|
||||||
this.ttl = ttl;
|
|
||||||
this.now = now;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String index() {
|
|
||||||
return index;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String type() {
|
|
||||||
return type;
|
|
||||||
}
|
|
||||||
|
|
||||||
public String id() {
|
|
||||||
return id;
|
|
||||||
}
|
|
||||||
|
|
||||||
public long timestamp() {
|
|
||||||
return timestamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
public long ttl() {
|
|
||||||
return ttl;
|
|
||||||
}
|
|
||||||
|
|
||||||
public long now() {
|
|
||||||
return now;
|
|
||||||
}
|
|
||||||
|
|
||||||
public AlreadyExpiredException(StreamInput in) throws IOException{
|
|
||||||
super(in);
|
|
||||||
index = in.readOptionalString();
|
|
||||||
type = in.readOptionalString();
|
|
||||||
id = in.readOptionalString();
|
|
||||||
timestamp = in.readLong();
|
|
||||||
ttl = in.readLong();
|
|
||||||
now = in.readLong();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
|
||||||
super.writeTo(out);
|
|
||||||
out.writeOptionalString(index);
|
|
||||||
out.writeOptionalString(type);
|
|
||||||
out.writeOptionalString(id);
|
|
||||||
out.writeLong(timestamp);
|
|
||||||
out.writeLong(ttl);
|
|
||||||
out.writeLong(now);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -612,11 +612,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||||
rescheduleFsyncTask(durability);
|
rescheduleFsyncTask(durability);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// update primary terms
|
|
||||||
for (final IndexShard shard : this.shards.values()) {
|
|
||||||
shard.updatePrimaryTerm(metadata.primaryTerm(shard.shardId().id()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void rescheduleFsyncTask(Translog.Durability durability) {
|
private void rescheduleFsyncTask(Translog.Durability durability) {
|
||||||
|
|
|
@ -120,13 +120,13 @@ public final class IndexSortConfig {
|
||||||
.map((name) -> new FieldSortSpec(name))
|
.map((name) -> new FieldSortSpec(name))
|
||||||
.toArray(FieldSortSpec[]::new);
|
.toArray(FieldSortSpec[]::new);
|
||||||
|
|
||||||
if (sortSpecs.length > 0 && indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
if (sortSpecs.length > 0 && indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha1)) {
|
||||||
/**
|
/**
|
||||||
* This index might be assigned to a node where the index sorting feature is not available
|
* This index might be assigned to a node where the index sorting feature is not available
|
||||||
* (ie. versions prior to {@link Version.V_6_0_0_alpha1_UNRELEASED}) so we must fail here rather than later.
|
* (ie. versions prior to {@link Version.V_6_0_0_alpha1_UNRELEASED}) so we must fail here rather than later.
|
||||||
*/
|
*/
|
||||||
throw new IllegalArgumentException("unsupported index.version.created:" + indexSettings.getIndexVersionCreated() +
|
throw new IllegalArgumentException("unsupported index.version.created:" + indexSettings.getIndexVersionCreated() +
|
||||||
", can't set index.sort on versions prior to " + Version.V_6_0_0_alpha1_UNRELEASED);
|
", can't set index.sort on versions prior to " + Version.V_6_0_0_alpha1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INDEX_SORT_ORDER_SETTING.exists(settings)) {
|
if (INDEX_SORT_ORDER_SETTING.exists(settings)) {
|
||||||
|
|
|
@ -74,14 +74,15 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
Map<String, AnalysisProvider<TokenizerFactory>> tokenizers,
|
Map<String, AnalysisProvider<TokenizerFactory>> tokenizers,
|
||||||
Map<String, AnalysisProvider<AnalyzerProvider<?>>> analyzers,
|
Map<String, AnalysisProvider<AnalyzerProvider<?>>> analyzers,
|
||||||
Map<String, AnalysisProvider<AnalyzerProvider<?>>> normalizers,
|
Map<String, AnalysisProvider<AnalyzerProvider<?>>> normalizers,
|
||||||
Map<String, PreConfiguredTokenFilter> preConfiguredTokenFilters) {
|
Map<String, PreConfiguredTokenFilter> preConfiguredTokenFilters,
|
||||||
|
Map<String, PreConfiguredTokenizer> preConfiguredTokenizers) {
|
||||||
this.environment = environment;
|
this.environment = environment;
|
||||||
this.charFilters = unmodifiableMap(charFilters);
|
this.charFilters = unmodifiableMap(charFilters);
|
||||||
this.tokenFilters = unmodifiableMap(tokenFilters);
|
this.tokenFilters = unmodifiableMap(tokenFilters);
|
||||||
this.tokenizers = unmodifiableMap(tokenizers);
|
this.tokenizers = unmodifiableMap(tokenizers);
|
||||||
this.analyzers = unmodifiableMap(analyzers);
|
this.analyzers = unmodifiableMap(analyzers);
|
||||||
this.normalizers = unmodifiableMap(normalizers);
|
this.normalizers = unmodifiableMap(normalizers);
|
||||||
prebuiltAnalysis = new PrebuiltAnalysis(preConfiguredTokenFilters);
|
prebuiltAnalysis = new PrebuiltAnalysis(preConfiguredTokenFilters, preConfiguredTokenizers);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -169,12 +170,12 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
*/
|
*/
|
||||||
tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
|
tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)));
|
||||||
tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings)));
|
tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings)));
|
||||||
return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
|
return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, TokenizerFactory> buildTokenizerFactories(IndexSettings indexSettings) throws IOException {
|
public Map<String, TokenizerFactory> buildTokenizerFactories(IndexSettings indexSettings) throws IOException {
|
||||||
final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_TOKENIZER);
|
final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_TOKENIZER);
|
||||||
return buildMapping(Component.TOKENIZER, indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories);
|
return buildMapping(Component.TOKENIZER, indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.preConfiguredTokenizers);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, CharFilterFactory> buildCharFilterFactories(IndexSettings indexSettings) throws IOException {
|
public Map<String, CharFilterFactory> buildCharFilterFactories(IndexSettings indexSettings) throws IOException {
|
||||||
|
@ -394,31 +395,22 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
private static class PrebuiltAnalysis implements Closeable {
|
private static class PrebuiltAnalysis implements Closeable {
|
||||||
|
|
||||||
final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider<?>>> analyzerProviderFactories;
|
final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider<?>>> analyzerProviderFactories;
|
||||||
final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizerFactories;
|
final Map<String, ? extends AnalysisProvider<TokenFilterFactory>> preConfiguredTokenFilters;
|
||||||
final Map<String, ? extends AnalysisProvider<TokenFilterFactory>> tokenFilterFactories;
|
final Map<String, ? extends AnalysisProvider<TokenizerFactory>> preConfiguredTokenizers;
|
||||||
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterFactories;
|
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterFactories;
|
||||||
|
|
||||||
private PrebuiltAnalysis(Map<String, PreConfiguredTokenFilter> preConfiguredTokenFilters) {
|
private PrebuiltAnalysis(
|
||||||
|
Map<String, PreConfiguredTokenFilter> preConfiguredTokenFilters,
|
||||||
|
Map<String, PreConfiguredTokenizer> preConfiguredTokenizers) {
|
||||||
Map<String, PreBuiltAnalyzerProviderFactory> analyzerProviderFactories = new HashMap<>();
|
Map<String, PreBuiltAnalyzerProviderFactory> analyzerProviderFactories = new HashMap<>();
|
||||||
Map<String, PreBuiltTokenizerFactoryFactory> tokenizerFactories = new HashMap<>();
|
|
||||||
Map<String, PreBuiltCharFilterFactoryFactory> charFilterFactories = new HashMap<>();
|
Map<String, PreBuiltCharFilterFactoryFactory> charFilterFactories = new HashMap<>();
|
||||||
|
|
||||||
// Analyzers
|
// Analyzers
|
||||||
for (PreBuiltAnalyzers preBuiltAnalyzerEnum : PreBuiltAnalyzers.values()) {
|
for (PreBuiltAnalyzers preBuiltAnalyzerEnum : PreBuiltAnalyzers.values()) {
|
||||||
String name = preBuiltAnalyzerEnum.name().toLowerCase(Locale.ROOT);
|
String name = preBuiltAnalyzerEnum.name().toLowerCase(Locale.ROOT);
|
||||||
analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, AnalyzerScope.INDICES, preBuiltAnalyzerEnum.getAnalyzer(Version.CURRENT)));
|
analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, AnalyzerScope.INDICES, preBuiltAnalyzerEnum.getAnalyzer(Version.CURRENT)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokenizers
|
|
||||||
for (PreBuiltTokenizers preBuiltTokenizer : PreBuiltTokenizers.values()) {
|
|
||||||
String name = preBuiltTokenizer.name().toLowerCase(Locale.ROOT);
|
|
||||||
tokenizerFactories.put(name, new PreBuiltTokenizerFactoryFactory(preBuiltTokenizer.getTokenizerFactory(Version.CURRENT)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Tokenizer aliases
|
|
||||||
tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT)));
|
|
||||||
tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT)));
|
|
||||||
tokenizerFactories.put("PathHierarchy", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.PATH_HIERARCHY.getTokenizerFactory(Version.CURRENT)));
|
|
||||||
|
|
||||||
// Char Filters
|
// Char Filters
|
||||||
for (PreBuiltCharFilters preBuiltCharFilter : PreBuiltCharFilters.values()) {
|
for (PreBuiltCharFilters preBuiltCharFilter : PreBuiltCharFilters.values()) {
|
||||||
String name = preBuiltCharFilter.name().toLowerCase(Locale.ROOT);
|
String name = preBuiltCharFilter.name().toLowerCase(Locale.ROOT);
|
||||||
|
@ -429,8 +421,8 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
|
|
||||||
this.analyzerProviderFactories = Collections.unmodifiableMap(analyzerProviderFactories);
|
this.analyzerProviderFactories = Collections.unmodifiableMap(analyzerProviderFactories);
|
||||||
this.charFilterFactories = Collections.unmodifiableMap(charFilterFactories);
|
this.charFilterFactories = Collections.unmodifiableMap(charFilterFactories);
|
||||||
this.tokenizerFactories = Collections.unmodifiableMap(tokenizerFactories);
|
this.preConfiguredTokenFilters = preConfiguredTokenFilters;
|
||||||
tokenFilterFactories = preConfiguredTokenFilters;
|
this.preConfiguredTokenizers = preConfiguredTokenizers;
|
||||||
}
|
}
|
||||||
|
|
||||||
public AnalysisModule.AnalysisProvider<CharFilterFactory> getCharFilterFactory(String name) {
|
public AnalysisModule.AnalysisProvider<CharFilterFactory> getCharFilterFactory(String name) {
|
||||||
|
@ -438,11 +430,11 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public AnalysisModule.AnalysisProvider<TokenFilterFactory> getTokenFilterFactory(String name) {
|
public AnalysisModule.AnalysisProvider<TokenFilterFactory> getTokenFilterFactory(String name) {
|
||||||
return tokenFilterFactories.get(name);
|
return preConfiguredTokenFilters.get(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
public AnalysisModule.AnalysisProvider<TokenizerFactory> getTokenizerFactory(String name) {
|
public AnalysisModule.AnalysisProvider<TokenizerFactory> getTokenizerFactory(String name) {
|
||||||
return tokenizerFactories.get(name);
|
return preConfiguredTokenizers.get(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
public AnalysisModule.AnalysisProvider<AnalyzerProvider<?>> getAnalyzerProvider(String name) {
|
public AnalysisModule.AnalysisProvider<AnalyzerProvider<?>> getAnalyzerProvider(String name) {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue