Merge remote-tracking branch 'es/master' into feature/ingest

This commit is contained in:
Martijn van Groningen 2015-11-11 14:14:44 +07:00
commit da87cbf2b4
319 changed files with 11115 additions and 1472 deletions

View File

@ -44,7 +44,7 @@ In order to run Elasticsearch from source without building a package, you can
run it using Maven:
-------------------------------------
./run.sh
gradle run
-------------------------------------
=== Test case filtering.
@ -455,5 +455,5 @@ mvn -Dtests.coverage verify jacoco:report
== Debugging from an IDE
If you want to run elasticsearch from your IDE, you should execute ./run.sh
If you want to run elasticsearch from your IDE, you should execute gradle run
It opens a remote debugging port that you can connect with your IDE.

View File

@ -31,6 +31,9 @@ buildscript {
// common maven publishing configuration
subprojects {
group = 'org.elasticsearch'
version = org.elasticsearch.gradle.VersionProperties.elasticsearch
plugins.withType(NexusPlugin).whenPluginAdded {
modifyPom {
project {
@ -91,34 +94,12 @@ allprojects {
sourceCompatibility = JavaVersion.VERSION_1_8
targetCompatibility = sourceCompatibility
// dependency versions that are used in more than one place
versions = [
lucene: "${luceneVersion}",
randomizedrunner: '2.2.0',
httpclient: '4.3.6'
]
// for eclipse hacks...
isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
}
}
subprojects {
repositories {
mavenCentral()
maven {
name 'sonatype-snapshots'
url 'http://oss.sonatype.org/content/repositories/snapshots/'
}
if (versions.lucene.contains('-snapshot')) {
String revision = (luceneVersion =~ /\d\.\d\.\d-snapshot-(\d+)/)[0][1]
maven {
name 'lucene-snapshots'
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"
}
}
}
// include license and notice in jars
gradle.projectsEvaluated {
tasks.withType(Jar) {
@ -133,7 +114,7 @@ subprojects {
configurations {
all {
resolutionStrategy {
//failOnVersionConflict()
failOnVersionConflict()
dependencySubstitution {
substitute module("org.elasticsearch:rest-api-spec:${version}") with project("${projectsPrefix}:rest-api-spec")
@ -199,3 +180,5 @@ task buildSrcEclipse(type: GradleBuild) {
}
tasks.eclipse.dependsOn(buildSrcEclipse)
task run(dependsOn: ':distribution:run')

View File

@ -1,4 +1,21 @@
import org.apache.tools.ant.filters.ReplaceTokens
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
plugins {
id 'groovy'
@ -8,16 +25,13 @@ plugins {
apply plugin: 'idea'
apply plugin: 'eclipse'
/*idea {
project {
languageLevel = '1.8'
vcs = 'Git'
}
}*/
group = 'org.elasticsearch.gradle'
archivesBaseName = 'build-tools'
Properties props = new Properties()
props.load(project.file('version.properties').newDataInputStream())
version = props.getProperty('elasticsearch')
repositories {
mavenCentral()
maven {
@ -30,8 +44,8 @@ repositories {
dependencies {
compile gradleApi()
compile localGroovy()
compile 'com.carrotsearch.randomizedtesting:junit4-ant:2.2.0'
compile('junit:junit:4.11') {
compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}"
compile("junit:junit:${props.getProperty('junit')}") {
transitive = false
}
compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3'
@ -41,16 +55,9 @@ dependencies {
compile 'de.thetaphi:forbiddenapis:2.0'
}
Properties props = new Properties()
props.load(project.file('../gradle.properties').newDataInputStream())
version = props.getProperty('version')
processResources {
inputs.file('../gradle.properties')
filter ReplaceTokens, tokens: [
'version': props.getProperty('version'),
'luceneVersion': props.getProperty('luceneVersion')
]
inputs.file('version.properties')
from 'version.properties'
}
extraArchive {

View File

@ -19,14 +19,16 @@
package org.elasticsearch.gradle
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.artifacts.ProjectDependency
import org.gradle.api.GradleException
import org.gradle.api.JavaVersion
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.tasks.bundling.Jar
import org.gradle.api.tasks.compile.JavaCompile
import org.gradle.util.VersionNumber
import org.gradle.util.GradleVersion
/**
* Encapsulates build configuration for elasticsearch projects.
@ -36,6 +38,7 @@ class BuildPlugin implements Plugin<Project> {
@Override
void apply(Project project) {
globalBuildInfo(project)
configureRepositories(project)
project.pluginManager.apply('java')
project.pluginManager.apply('carrotsearch.randomized-testing')
// these plugins add lots of info to our jars
@ -45,6 +48,8 @@ class BuildPlugin implements Plugin<Project> {
project.pluginManager.apply('nebula.info-scm')
project.pluginManager.apply('nebula.info-jar')
configureConfigurations(project)
project.ext.versions = VersionProperties.versions
configureCompile(project)
configureJarManifest(project)
configureTest(project)
@ -53,17 +58,6 @@ class BuildPlugin implements Plugin<Project> {
static void globalBuildInfo(Project project) {
if (project.rootProject.ext.has('buildChecksDone') == false) {
// enforce gradle version
VersionNumber gradleVersion = VersionNumber.parse(project.gradle.gradleVersion)
if (gradleVersion.major < 2 || gradleVersion.major == 2 && gradleVersion.minor < 6) {
throw new GradleException('Gradle 2.6 or above is required to build elasticsearch')
}
// enforce Java version
if (!JavaVersion.current().isJava8Compatible()) {
throw new GradleException('Java 8 or above is required to build Elasticsearch')
}
// Build debugging info
println '======================================='
println 'Elasticsearch Build Hamster says Hello!'
@ -71,10 +65,57 @@ class BuildPlugin implements Plugin<Project> {
println " Gradle Version : ${project.gradle.gradleVersion}"
println " JDK Version : ${System.getProperty('java.runtime.version')} (${System.getProperty('java.vendor')})"
println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})"
// enforce gradle version
GradleVersion minGradle = GradleVersion.version('2.8')
if (GradleVersion.current() < minGradle) {
throw new GradleException("${minGradle} or above is required to build elasticsearch")
}
// enforce Java version
if (!JavaVersion.current().isJava8Compatible()) {
throw new GradleException('Java 8 or above is required to build Elasticsearch')
}
project.rootProject.ext.buildChecksDone = true
}
}
/** Makes dependencies non-transitive by default */
static void configureConfigurations(Project project) {
// force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself
project.configurations.compile.dependencies.all { dep ->
if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') {
dep.transitive = false
}
}
project.configurations.testCompile.dependencies.all { dep ->
if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') {
dep.transitive = false
}
}
}
/** Adds repositores used by ES dependencies */
static void configureRepositories(Project project) {
RepositoryHandler repos = project.repositories
repos.mavenCentral()
repos.maven {
name 'sonatype-snapshots'
url 'http://oss.sonatype.org/content/repositories/snapshots/'
}
String luceneVersion = VersionProperties.lucene
if (luceneVersion.contains('-snapshot')) {
// extract the revision number from the version with a regex matcher
String revision = (luceneVersion =~ /\w+-snapshot-(\d+)/)[0][1]
repos.maven {
name 'lucene-snapshots'
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"
}
}
}
/** Adds compiler settings to the project */
static void configureCompile(Project project) {
project.afterEvaluate {
@ -91,8 +132,8 @@ class BuildPlugin implements Plugin<Project> {
project.afterEvaluate {
project.tasks.withType(Jar) { Jar jarTask ->
manifest {
attributes('X-Compile-Elasticsearch-Version': ElasticsearchProperties.version,
'X-Compile-Lucene-Version': ElasticsearchProperties.luceneVersion)
attributes('X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch,
'X-Compile-Lucene-Version': VersionProperties.lucene)
}
}
}

View File

@ -19,19 +19,23 @@
package org.elasticsearch.gradle
/**
* Accessor for properties about the version of elasticsearch this was built with.
* Accessor for shared dependency versions used by elasticsearch, namely the elasticsearch and lucene versions.
*/
class ElasticsearchProperties {
static final String version
static final String luceneVersion
class VersionProperties {
static final String elasticsearch
static final String lucene
static final Map<String, String> versions = new HashMap<>()
static {
Properties props = new Properties()
InputStream propsStream = ElasticsearchProperties.class.getResourceAsStream('/elasticsearch.properties')
InputStream propsStream = VersionProperties.class.getResourceAsStream('/version.properties')
if (propsStream == null) {
throw new RuntimeException('/elasticsearch.properties resource missing')
throw new RuntimeException('/version.properties resource missing')
}
props.load(propsStream)
version = props.getProperty('version')
luceneVersion = props.getProperty('luceneVersion')
elasticsearch = props.getProperty('elasticsearch')
lucene = props.getProperty('lucene')
for (String property : props.stringPropertyNames()) {
versions.put(property, props.getProperty(property))
}
}
}

View File

@ -20,7 +20,6 @@ package org.elasticsearch.gradle.plugin
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.ElasticsearchProperties
import org.elasticsearch.gradle.test.RestIntegTestTask
import org.gradle.api.Project
import org.gradle.api.Task
@ -49,7 +48,7 @@ class PluginBuildPlugin extends BuildPlugin {
project.integTest.configure {
dependsOn project.bundlePlugin
cluster {
plugin project.name, project.bundlePlugin.outputs.files
plugin project.pluginProperties.extension.name, project.bundlePlugin.outputs.files
}
}
}
@ -64,20 +63,18 @@ class PluginBuildPlugin extends BuildPlugin {
}
static void configureDependencies(Project project) {
String elasticsearchVersion = ElasticsearchProperties.version
project.dependencies {
provided "org.elasticsearch:elasticsearch:${elasticsearchVersion}"
testCompile "org.elasticsearch:test-framework:${elasticsearchVersion}"
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}"
// we "upgrade" these optional deps to provided for plugins, since they will run
// with a full elasticsearch server that includes optional deps
// TODO: remove duplication of version here with core...
provided 'com.spatial4j:spatial4j:0.4.1'
provided 'com.vividsolutions:jts:1.13'
provided 'com.github.spullara.mustache.java:compiler:0.9.1'
provided "log4j:log4j:1.2.17"
provided "log4j:apache-log4j-extras:1.2.17"
provided "org.slf4j:slf4j-api:1.6.2"
provided 'net.java.dev.jna:jna:4.1.0'
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
provided "com.vividsolutions:jts:${project.versions.jts}"
provided "com.github.spullara.mustache.java:compiler:${project.versions.mustache}"
provided "log4j:log4j:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
provided "org.slf4j:slf4j-api:${project.versions.slf4j}"
provided "net.java.dev.jna:jna:${project.versions.jna}"
}
}

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.gradle.plugin
import org.elasticsearch.gradle.ElasticsearchProperties
import org.elasticsearch.gradle.VersionProperties
import org.gradle.api.InvalidUserDataException
import org.gradle.api.Task
import org.gradle.api.tasks.Copy
@ -72,7 +72,7 @@ class PluginPropertiesTask extends Copy {
'name': extension.name,
'description': extension.description,
'version': extension.version,
'elasticsearchVersion': ElasticsearchProperties.version,
'elasticsearchVersion': VersionProperties.elasticsearch,
'javaVersion': project.targetCompatibility as String,
'jvm': extension.jvm as String,
'site': extension.site as String,

View File

@ -36,6 +36,9 @@ class ClusterConfiguration {
@Input
int transportPort = 9500
@Input
boolean daemonize = true
@Input
String jvmArgs = System.getProperty('tests.jvm.argline', '')

View File

@ -18,8 +18,12 @@
*/
package org.elasticsearch.gradle.test
import org.gradle.internal.jvm.Jvm
import java.nio.file.Paths
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.ElasticsearchProperties
import org.elasticsearch.gradle.VersionProperties
import org.gradle.api.DefaultTask
import org.gradle.api.GradleException
import org.gradle.api.InvalidUserDataException
@ -52,7 +56,7 @@ class ClusterFormationTasks {
/** Adds a dependency on the given distribution */
static void configureDistributionDependency(Project project, String distro) {
String elasticsearchVersion = ElasticsearchProperties.version
String elasticsearchVersion = VersionProperties.elasticsearch
String packaging = distro == 'tar' ? 'tar.gz' : distro
project.configurations {
elasticsearchDistro
@ -101,7 +105,7 @@ class ClusterFormationTasks {
for (Map.Entry<String, FileCollection> plugin : config.plugins.entrySet()) {
// replace every dash followed by a character with just the uppercase character
String camelName = plugin.getKey().replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
String taskName = "${task.name}#install${camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1)}"
String taskName = "${task.name}#install${camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1)}Plugin"
// delay reading the file location until execution time by wrapping in a closure within a GString
String file = "${ -> new File(pluginsTmpDir, plugin.getValue().singleFile.getName()).toURI().toURL().toString() }"
Object[] args = [new File(home, 'bin/plugin'), 'install', file]
@ -173,12 +177,10 @@ class ClusterFormationTasks {
if (config.plugins.isEmpty()) {
return setup
}
// collect the files for plugins into a list, but wrap each in a closure to delay
// looking for the filename until execution time
List files = config.plugins.values().collect { plugin -> return { plugin.singleFile } }
return project.tasks.create(name: name, type: Copy, dependsOn: setup) {
into pluginsTmpDir
from(*files) // spread the list into varargs
from(config.plugins.values())
}
}
@ -244,17 +246,22 @@ class ClusterFormationTasks {
// elasticsearch.bat is spawned as it has no daemon mode
return project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) << {
// Fall back to Ant exec task as Gradle Exec task does not support spawning yet
ant.exec(executable: 'cmd', spawn: true, dir: cwd) {
ant.exec(executable: 'cmd', spawn: config.daemonize, dir: cwd) {
esEnv.each { key, value -> env(key: key, value: value) }
(['/C', 'call', esScript] + esProps).each { arg(value: it) }
}
esPostStartActions(ant, logger)
}
} else {
List esExecutable = [esScript]
if(config.daemonize) {
esExecutable.add("-d")
}
return project.tasks.create(name: name, type: Exec, dependsOn: setup) {
workingDir cwd
executable 'sh'
args esScript, '-d' // daemonize!
args esExecutable
args esProps
environment esEnv
errorOutput = new ByteArrayOutputStream()
@ -279,7 +286,16 @@ class ClusterFormationTasks {
onlyIf { pidFile.exists() }
// the pid file won't actually be read until execution time, since the read is wrapped within an inner closure of the GString
ext.pid = "${ -> pidFile.getText('UTF-8').trim()}"
commandLine new File(System.getenv('JAVA_HOME'), 'bin/jps'), '-l'
File jps
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
jps = getJpsExecutableByName("jps.exe")
} else {
jps = getJpsExecutableByName("jps")
}
if (!jps.exists()) {
throw new GradleException("jps executable not found; ensure that you're running Gradle with the JDK rather than the JRE")
}
commandLine jps, '-l'
standardOutput = new ByteArrayOutputStream()
doLast {
String out = standardOutput.toString()
@ -297,6 +313,10 @@ class ClusterFormationTasks {
}
}
private static File getJpsExecutableByName(String jpsExecutableName) {
return Paths.get(Jvm.current().javaHome.toString(), "bin/" + jpsExecutableName).toFile()
}
/** Adds a task to kill an elasticsearch node with the given pidfile */
static Task configureStopTask(String name, Project project, Object depends, File pidFile) {
return project.tasks.create(name: name, type: Exec, dependsOn: depends) {
@ -325,7 +345,7 @@ class ClusterFormationTasks {
switch (distro) {
case 'zip':
case 'tar':
path = "elasticsearch-${ElasticsearchProperties.version}"
path = "elasticsearch-${VersionProperties.elasticsearch}"
break;
default:
throw new InvalidUserDataException("Unknown distribution: ${distro}")

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.gradle.test
import org.elasticsearch.gradle.ElasticsearchProperties
import org.elasticsearch.gradle.VersionProperties
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.tasks.Copy
@ -38,7 +38,7 @@ class RestSpecHack {
restSpec
}
project.dependencies {
restSpec "org.elasticsearch:rest-api-spec:${ElasticsearchProperties.version}"
restSpec "org.elasticsearch:rest-api-spec:${VersionProperties.elasticsearch}"
}
}

View File

@ -0,0 +1,12 @@
package org.elasticsearch.gradle.test
import org.gradle.api.DefaultTask
import org.gradle.api.tasks.TaskAction
class RunTask extends DefaultTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
RunTask() {
ClusterFormationTasks.setup(project, this, clusterConfig)
}
}

View File

@ -21,7 +21,8 @@
package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin
import org.elasticsearch.gradle.ElasticsearchProperties
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.VersionProperties
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.plugins.JavaBasePlugin
@ -31,6 +32,8 @@ class StandaloneTestBasePlugin implements Plugin<Project> {
@Override
void apply(Project project) {
BuildPlugin.configureRepositories(project)
project.pluginManager.apply(JavaBasePlugin)
project.pluginManager.apply(RandomizedTestingPlugin)
@ -42,7 +45,7 @@ class StandaloneTestBasePlugin implements Plugin<Project> {
test
}
project.dependencies {
testCompile "org.elasticsearch:test-framework:${ElasticsearchProperties.version}"
testCompile "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}"
}
project.eclipse {

View File

@ -21,7 +21,6 @@ package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.ElasticsearchProperties
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.plugins.JavaBasePlugin

View File

@ -87,3 +87,6 @@ java.util.concurrent.Future#cancel(boolean)
@defaultMessage Don't try reading from paths that are not configured in Environment, resolve from Environment instead
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
org.apache.lucene.search.Query#setBoost(float)

View File

@ -0,0 +1,20 @@
elasticsearch = 3.0.0-SNAPSHOT
lucene = 5.4.0-snapshot-1712973
# optional dependencies
spatial4j = 0.5
jts = 1.13
mustache = 0.9.1
jackson = 2.6.2
log4j = 1.2.17
slf4j = 1.6.2
jna = 4.1.0
# test dependencies
randomizedrunner = 2.2.0
junit = 4.11
httpclient = 4.3.6
httpcore = 4.3.3
commonslogging = 1.1.3
commonscodec = 1.10

View File

@ -27,25 +27,23 @@ apply plugin: 'nebula.optional-base'
archivesBaseName = 'elasticsearch'
versions << [
jackson: '2.6.2',
log4j: '1.2.17',
slf4j: '1.6.2'
]
dependencies {
// lucene
compile "org.apache.lucene:lucene-core:${versions.lucene}"
compile "org.apache.lucene:lucene-backward-codecs:${versions.lucene}"
compile "org.apache.lucene:lucene-analyzers-common:${versions.lucene}"
compile "org.apache.lucene:lucene-queries:${versions.lucene}"
compile "org.apache.lucene:lucene-memory:${versions.lucene}"
compile "org.apache.lucene:lucene-backward-codecs:${versions.lucene}"
compile "org.apache.lucene:lucene-grouping:${versions.lucene}"
compile "org.apache.lucene:lucene-highlighter:${versions.lucene}"
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
compile "org.apache.lucene:lucene-join:${versions.lucene}"
compile "org.apache.lucene:lucene-memory:${versions.lucene}"
compile "org.apache.lucene:lucene-misc:${versions.lucene}"
compile "org.apache.lucene:lucene-queries:${versions.lucene}"
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
compile 'org.elasticsearch:securesm:1.0'
@ -62,10 +60,10 @@ dependencies {
// json and yaml
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}"
compile(group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-yaml', version: versions.jackson) {
exclude group: 'com.fasterxml.jackson.core', module: 'jackson-databind'
}
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
compile "org.yaml:snakeyaml:1.15" // used by jackson yaml
// network stack
compile 'io.netty:netty:3.10.5.Final'
// compression of transport protocol
@ -76,18 +74,18 @@ dependencies {
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
// lucene spatial
compile 'com.spatial4j:spatial4j:0.5', optional
compile 'com.vividsolutions:jts:1.13', optional
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
compile "com.vividsolutions:jts:${versions.jts}", optional
// templating
compile 'com.github.spullara.mustache.java:compiler:0.9.1', optional
compile "com.github.spullara.mustache.java:compiler:${versions.mustache}", optional
// logging
compile "log4j:log4j:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
compile "org.slf4j:slf4j-api:${versions.slf4j}", optional
compile 'net.java.dev.jna:jna:4.1.0', optional
compile "net.java.dev.jna:jna:${versions.jna}", optional
if (isEclipse == false || project.path == "${projectsPrefix}:core-tests") {
testCompile("org.elasticsearch:test-framework:${version}") {
@ -136,4 +134,3 @@ if (isEclipse == false || project.path == "${projectsPrefix}:core-tests") {
Task copyRestSpec = RestSpecHack.configureTask(project, true)
integTest.dependsOn copyRestSpec
}

View File

@ -18,18 +18,9 @@
*/
package org.apache.lucene.queries;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.InPlaceMergeSorter;
import org.apache.lucene.util.ToStringUtils;
@ -37,6 +28,7 @@ import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
/**
* BlendedTermQuery can be used to unify term statistics across
@ -77,6 +69,10 @@ public abstract class BlendedTermQuery extends Query {
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
IndexReaderContext context = reader.getContext();
TermContext[] ctx = new TermContext[terms.length];
int[] docFreqs = new int[ctx.length];
@ -87,9 +83,7 @@ public abstract class BlendedTermQuery extends Query {
final int maxDoc = reader.maxDoc();
blend(ctx, maxDoc, reader);
Query query = topLevelQuery(terms, ctx, docFreqs, maxDoc);
query.setBoost(getBoost());
return query;
return topLevelQuery(terms, ctx, docFreqs, maxDoc);
}
protected abstract Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc);
@ -274,20 +268,15 @@ public abstract class BlendedTermQuery extends Query {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
BlendedTermQuery that = (BlendedTermQuery) o;
if (!Arrays.equals(equalsTerms(), that.equalsTerms())) return false;
return true;
return Arrays.equals(equalsTerms(), that.equalsTerms());
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + Arrays.hashCode(equalsTerms());
return result;
return Objects.hash(super.hashCode(), Arrays.hashCode(equalsTerms()));
}
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final boolean disableCoord) {
@ -298,16 +287,16 @@ public abstract class BlendedTermQuery extends Query {
return new BlendedTermQuery(terms, boosts) {
@Override
protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) {
BooleanQuery.Builder query = new BooleanQuery.Builder();
query.setDisableCoord(disableCoord);
BooleanQuery.Builder booleanQueryBuilder = new BooleanQuery.Builder();
booleanQueryBuilder.setDisableCoord(disableCoord);
for (int i = 0; i < terms.length; i++) {
TermQuery termQuery = new TermQuery(terms[i], ctx[i]);
if (boosts != null) {
termQuery.setBoost(boosts[i]);
Query query = new TermQuery(terms[i], ctx[i]);
if (boosts != null && boosts[i] != 1f) {
query = new BoostQuery(query, boosts[i]);
}
query.add(termQuery, BooleanClause.Occur.SHOULD);
booleanQueryBuilder.add(query, BooleanClause.Occur.SHOULD);
}
return query.build();
return booleanQueryBuilder.build();
}
};
}
@ -321,16 +310,16 @@ public abstract class BlendedTermQuery extends Query {
BooleanQuery.Builder lowBuilder = new BooleanQuery.Builder();
lowBuilder.setDisableCoord(disableCoord);
for (int i = 0; i < terms.length; i++) {
TermQuery termQuery = new TermQuery(terms[i], ctx[i]);
if (boosts != null) {
termQuery.setBoost(boosts[i]);
Query query = new TermQuery(terms[i], ctx[i]);
if (boosts != null && boosts[i] != 1f) {
query = new BoostQuery(query, boosts[i]);
}
if ((maxTermFrequency >= 1f && docFreqs[i] > maxTermFrequency)
|| (docFreqs[i] > (int) Math.ceil(maxTermFrequency
* (float) maxDoc))) {
highBuilder.add(termQuery, BooleanClause.Occur.SHOULD);
highBuilder.add(query, BooleanClause.Occur.SHOULD);
} else {
lowBuilder.add(termQuery, BooleanClause.Occur.SHOULD);
lowBuilder.add(query, BooleanClause.Occur.SHOULD);
}
}
BooleanQuery high = highBuilder.build();
@ -363,15 +352,15 @@ public abstract class BlendedTermQuery extends Query {
return new BlendedTermQuery(terms, boosts) {
@Override
protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) {
DisjunctionMaxQuery query = new DisjunctionMaxQuery(tieBreakerMultiplier);
DisjunctionMaxQuery disMaxQuery = new DisjunctionMaxQuery(tieBreakerMultiplier);
for (int i = 0; i < terms.length; i++) {
TermQuery termQuery = new TermQuery(terms[i], ctx[i]);
if (boosts != null) {
termQuery.setBoost(boosts[i]);
Query query = new TermQuery(terms[i], ctx[i]);
if (boosts != null && boosts[i] != 1f) {
query = new BoostQuery(query, boosts[i]);
}
query.add(termQuery);
disMaxQuery.add(query);
}
return query;
return disMaxQuery;
}
};
}

View File

@ -29,6 +29,7 @@ import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import java.io.IOException;
import java.util.Objects;
/** A {@link Query} that only matches documents that are greater than or equal
* to a configured doc ID. */
@ -43,7 +44,7 @@ public final class MinDocQuery extends Query {
@Override
public int hashCode() {
return 31 * super.hashCode() + minDoc;
return Objects.hash(super.hashCode(), minDoc);
}
@Override

View File

@ -23,13 +23,7 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.*;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.automaton.RegExp;
import org.elasticsearch.common.lucene.search.Queries;
@ -41,12 +35,7 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.QueryParsers;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.*;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
@ -148,8 +137,7 @@ public class MapperQueryParser extends QueryParser {
Query q = getFieldQuerySingle(mField, queryText, quoted);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
disMaxQuery.add(applyBoost(mField, q));
}
}
if (!added) {
@ -161,8 +149,7 @@ public class MapperQueryParser extends QueryParser {
for (String mField : fields) {
Query q = getFieldQuerySingle(mField, queryText, quoted);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
@ -250,9 +237,8 @@ public class MapperQueryParser extends QueryParser {
Query q = super.getFieldQuery(mField, queryText, slop);
if (q != null) {
added = true;
applyBoost(mField, q);
q = applySlop(q, slop);
disMaxQuery.add(q);
disMaxQuery.add(applyBoost(mField, q));
}
}
if (!added) {
@ -264,9 +250,8 @@ public class MapperQueryParser extends QueryParser {
for (String mField : fields) {
Query q = super.getFieldQuery(mField, queryText, slop);
if (q != null) {
applyBoost(mField, q);
q = applySlop(q, slop);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
@ -305,8 +290,7 @@ public class MapperQueryParser extends QueryParser {
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
disMaxQuery.add(applyBoost(mField, q));
}
}
if (!added) {
@ -318,8 +302,7 @@ public class MapperQueryParser extends QueryParser {
for (String mField : fields) {
Query q = getRangeQuerySingle(mField, part1, part2, startInclusive, endInclusive);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
@ -371,8 +354,7 @@ public class MapperQueryParser extends QueryParser {
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
disMaxQuery.add(applyBoost(mField, q));
}
}
if (!added) {
@ -383,8 +365,9 @@ public class MapperQueryParser extends QueryParser {
List<BooleanClause> clauses = new ArrayList<>();
for (String mField : fields) {
Query q = getFuzzyQuerySingle(mField, termStr, minSimilarity);
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
if (q != null) {
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
return getBooleanQuery(clauses, true);
}
@ -434,8 +417,7 @@ public class MapperQueryParser extends QueryParser {
Query q = getPrefixQuerySingle(mField, termStr);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
disMaxQuery.add(applyBoost(mField, q));
}
}
if (!added) {
@ -447,8 +429,7 @@ public class MapperQueryParser extends QueryParser {
for (String mField : fields) {
Query q = getPrefixQuerySingle(mField, termStr);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
@ -566,8 +547,7 @@ public class MapperQueryParser extends QueryParser {
Query q = getWildcardQuerySingle(mField, termStr);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
disMaxQuery.add(applyBoost(mField, q));
}
}
if (!added) {
@ -579,8 +559,7 @@ public class MapperQueryParser extends QueryParser {
for (String mField : fields) {
Query q = getWildcardQuerySingle(mField, termStr);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
@ -697,8 +676,7 @@ public class MapperQueryParser extends QueryParser {
Query q = getRegexpQuerySingle(mField, termStr);
if (q != null) {
added = true;
applyBoost(mField, q);
disMaxQuery.add(q);
disMaxQuery.add(applyBoost(mField, q));
}
}
if (!added) {
@ -710,8 +688,7 @@ public class MapperQueryParser extends QueryParser {
for (String mField : fields) {
Query q = getRegexpQuerySingle(mField, termStr);
if (q != null) {
applyBoost(mField, q);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
@ -761,11 +738,12 @@ public class MapperQueryParser extends QueryParser {
return fixNegativeQueryIfNeeded(q);
}
private void applyBoost(String field, Query q) {
private Query applyBoost(String field, Query q) {
Float fieldBoost = settings.fieldsAndWeights().get(field);
if (fieldBoost != null) {
q.setBoost(fieldBoost);
if (fieldBoost != null && fieldBoost != 1f) {
return new BoostQuery(q, fieldBoost);
}
return q;
}
private Query applySlop(Query q, int slop) {
@ -779,7 +757,9 @@ public class MapperQueryParser extends QueryParser {
builder.add(terms[i], positions[i]);
}
pq = builder.build();
pq.setBoost(q.getBoost());
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
assert q.getBoost() == 1f;
assert q instanceof BoostQuery == false;
return pq;
} else if (q instanceof MultiPhraseQuery) {
((MultiPhraseQuery) q).setSlop(slop);

View File

@ -22,11 +22,7 @@ package org.apache.lucene.search.vectorhighlight;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.*;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
@ -103,8 +99,7 @@ public class CustomFieldQuery extends FieldQuery {
for (int i = 0; i < termsIdx.length; i++) {
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
}
PhraseQuery query = queryBuilder.build();
query.setBoost(orig.getBoost());
Query query = queryBuilder.build();
this.flatten(query, reader, flatQueries, orig.getBoost());
} else {
Term[] t = terms.get(currentPos);

View File

@ -77,7 +77,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
}
String indexName = recoveryState.getShardId().getIndex();
if (!shardResponses.containsKey(indexName)) {
shardResponses.put(indexName, new ArrayList<RecoveryState>());
shardResponses.put(indexName, new ArrayList<>());
}
if (request.activeOnly()) {
if (recoveryState.getStage() != RecoveryState.Stage.DONE) {

View File

@ -64,6 +64,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
private long sizeInBytes = 0;
public BulkRequest() {
}
/**
* Creates a bulk request caused by some other request, which is provided as an
* argument so that its headers and context can be copied to the new request
*/
public BulkRequest(ActionRequest request) {
super(request);
}
/**
* Adds a list of requests to be executed. Either index or delete requests.
*/

View File

@ -37,6 +37,17 @@ public class ClearScrollRequest extends ActionRequest<ClearScrollRequest> {
private List<String> scrollIds;
public ClearScrollRequest() {
}
/**
* Creates a clear scroll request caused by some other request, which is provided as an
* argument so that its headers and context can be copied to the new request
*/
public ClearScrollRequest(ActionRequest request) {
super(request);
}
public List<String> getScrollIds() {
return scrollIds;
}

View File

@ -46,6 +46,14 @@ public class SearchScrollRequest extends ActionRequest<SearchScrollRequest> {
this.scrollId = scrollId;
}
/**
* Creates a scroll request caused by some other request, which is provided as an
* argument so that its headers and context can be copied to the new request
*/
public SearchScrollRequest(ActionRequest request) {
super(request);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.support.master;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.support.ActionFilters;
@ -29,15 +30,16 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportService;
@ -48,6 +50,19 @@ import java.util.function.Supplier;
* A base class for operations that needs to be performed on the master node.
*/
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
private static final ClusterStateObserver.ChangePredicate masterNodeChangedPredicate = new ClusterStateObserver.ChangePredicate() {
@Override
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus,
ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
// The condition !newState.nodes().masterNodeId().equals(previousState.nodes().masterNodeId()) is not sufficient as the same master node might get reelected after a disruption.
return newState.nodes().masterNodeId() != null && newState != previousState;
}
@Override
public boolean apply(ClusterChangedEvent event) {
return event.nodesDelta().masterNodeChanged();
}
};
protected final TransportService transportService;
protected final ClusterService clusterService;
@ -75,20 +90,40 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
protected abstract ClusterBlockException checkBlock(Request request, ClusterState state);
protected void processBeforeDelegationToMaster(Request request, ClusterState state) {
}
@Override
protected void doExecute(final Request request, ActionListener<Response> listener) {
new AsyncSingleAction(request, listener).start();
}
class AsyncSingleAction {
private final ActionListener<Response> listener;
private final Request request;
private volatile ClusterStateObserver observer;
private final ClusterStateObserver.ChangePredicate retryableOrNoBlockPredicate = new ClusterStateObserver.ValidationPredicate() {
@Override
protected boolean validate(ClusterState newState) {
ClusterBlockException blockException = checkBlock(request, newState);
return (blockException == null || !blockException.retryable());
}
};
AsyncSingleAction(Request request, ActionListener<Response> listener) {
this.request = request;
// TODO do we really need to wrap it in a listener? the handlers should be cheap
if ((listener instanceof ThreadedActionListener) == false) {
listener = new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.LISTENER, listener);
}
innerExecute(request, listener, new ClusterStateObserver(clusterService, request.masterNodeTimeout(), logger), false);
this.listener = listener;
}
private void innerExecute(final Request request, final ActionListener<Response> listener, final ClusterStateObserver observer, final boolean retrying) {
public void start() {
this.observer = new ClusterStateObserver(clusterService, request.masterNodeTimeout(), logger);
doStart();
}
protected void doStart() {
final ClusterState clusterState = observer.observedState();
final DiscoveryNodes nodes = clusterState.nodes();
if (nodes.localNodeMaster() || localExecute(request)) {
@ -97,125 +132,54 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
if (blockException != null) {
if (!blockException.retryable()) {
listener.onFailure(blockException);
return;
}
logger.trace("can't execute due to a cluster block, retrying", blockException);
observer.waitForNextChange(
new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
innerExecute(request, listener, observer, false);
}
@Override
public void onClusterServiceClose() {
listener.onFailure(blockException);
}
@Override
public void onTimeout(TimeValue timeout) {
listener.onFailure(blockException);
}
}, new ClusterStateObserver.ValidationPredicate() {
@Override
protected boolean validate(ClusterState newState) {
ClusterBlockException blockException = checkBlock(request, newState);
return (blockException == null || !blockException.retryable());
}
}
);
} else {
threadPool.executor(executor).execute(new ActionRunnable(listener) {
logger.trace("can't execute due to a cluster block, retrying", blockException);
retry(blockException, retryableOrNoBlockPredicate);
}
} else {
ActionListener<Response> delegate = new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
listener.onResponse(response);
}
@Override
public void onFailure(Throwable t) {
if (t instanceof Discovery.FailedToCommitClusterStateException
|| (t instanceof NotMasterException)) {
logger.debug("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", t, actionName);
retry(t, masterNodeChangedPredicate);
} else {
listener.onFailure(t);
}
}
};
threadPool.executor(executor).execute(new ActionRunnable(delegate) {
@Override
protected void doRun() throws Exception {
masterOperation(request, clusterService.state(), listener);
masterOperation(request, clusterService.state(), delegate);
}
});
}
} else {
if (nodes.masterNode() == null) {
if (retrying) {
listener.onFailure(new MasterNotDiscoveredException());
} else {
logger.debug("no known master node, scheduling a retry");
observer.waitForNextChange(
new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
innerExecute(request, listener, observer, true);
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
listener.onFailure(new MasterNotDiscoveredException("waited for [" + timeout + "]"));
}
}, new ClusterStateObserver.ChangePredicate() {
@Override
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus,
ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
return newState.nodes().masterNodeId() != null;
}
@Override
public boolean apply(ClusterChangedEvent event) {
return event.nodesDelta().masterNodeChanged();
}
}
);
}
return;
}
processBeforeDelegationToMaster(request, clusterState);
transportService.sendRequest(nodes.masterNode(), actionName, request, new BaseTransportResponseHandler<Response>() {
retry(new MasterNotDiscoveredException(), masterNodeChangedPredicate);
} else {
transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
@Override
public Response newInstance() {
return newResponse();
}
@Override
public void handleResponse(Response response) {
listener.onResponse(response);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleException(final TransportException exp) {
if (exp.unwrapCause() instanceof ConnectTransportException) {
Throwable cause = exp.unwrapCause();
if (cause instanceof ConnectTransportException) {
// we want to retry here a bit to see if a new master is elected
logger.debug("connection exception while trying to forward request to master node [{}], scheduling a retry. Error: [{}]",
nodes.masterNode(), exp.getDetailedMessage());
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
innerExecute(request, listener, observer, false);
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
listener.onFailure(new MasterNotDiscoveredException());
}
}, new ClusterStateObserver.EventPredicate() {
@Override
public boolean apply(ClusterChangedEvent event) {
return event.nodesDelta().masterNodeChanged();
}
}
);
logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]",
actionName, nodes.masterNode(), exp.getDetailedMessage());
retry(cause, masterNodeChangedPredicate);
} else {
listener.onFailure(exp);
}
@ -224,3 +188,27 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
}
}
}
private void retry(final Throwable failure, final ClusterStateObserver.ChangePredicate changePredicate) {
observer.waitForNextChange(
new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
doStart();
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
logger.debug("timed out while retrying [{}] after failure (timeout [{}])", failure, actionName, timeout);
listener.onFailure(failure);
}
}, changePredicate
);
}
}
}

View File

@ -74,18 +74,6 @@ final class ESPolicy extends Policy {
}
}
// Special handling for broken AWS code which destroys all SSL security
// REMOVE THIS when https://github.com/aws/aws-sdk-java/pull/432 is fixed
if (permission instanceof RuntimePermission && "accessClassInPackage.sun.security.ssl".equals(permission.getName())) {
for (StackTraceElement element : Thread.currentThread().getStackTrace()) {
if ("com.amazonaws.http.conn.ssl.SdkTLSSocketFactory".equals(element.getClassName()) &&
"verifyMasterSecret".equals(element.getMethodName())) {
// we found the horrible method: the hack begins!
// force the aws code to back down, by throwing an exception that it catches.
rethrow(new IllegalAccessException("no amazon, you cannot do this."));
}
}
}
// otherwise defer to template + dynamic file permissions
return template.implies(domain, permission) || dynamic.implies(permission);
}
@ -104,20 +92,4 @@ final class ESPolicy extends Policy {
// return UNSUPPORTED_EMPTY_COLLECTION since it is safe.
return super.getPermissions(codesource);
}
/**
* Classy puzzler to rethrow any checked exception as an unchecked one.
*/
private static class Rethrower<T extends Throwable> {
private void rethrow(Throwable t) throws T {
throw (T) t;
}
}
/**
* Rethrows <code>t</code> (identical object).
*/
private void rethrow(Throwable t) {
new Rethrower<Error>().rethrow(t);
}
}

View File

@ -205,6 +205,7 @@ public class ClusterModule extends AbstractModule {
registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE, Validator.EMPTY);
registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE + ".*", Validator.EMPTY);
registerClusterDynamicSetting(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, Validator.BOOLEAN);
registerClusterDynamicSetting(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
}
private void registerBuiltinIndexSettings() {

View File

@ -51,7 +51,7 @@ abstract public class ClusterStateUpdateTask {
* called when the task was rejected because the local node is no longer master
*/
public void onNoLongerMaster(String source) {
onFailure(source, new EsRejectedExecutionException("no longer master. source: [" + source + "]"));
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
}
/**

View File

@ -16,13 +16,14 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.discovery.zen;
package org.elasticsearch.cluster;
/**
* Thrown when a node join request or a master ping reaches a node which is not
* currently acting as a master.
* currently acting as a master or when a cluster state update task is to be executed
* on a node that is no longer master.
*/
public class NotMasterException extends IllegalStateException {

View File

@ -118,6 +118,9 @@ public class RoutingAllocation {
private boolean debugDecision = false;
private boolean hasPendingAsyncFetch = false;
/**
* Creates a new {@link RoutingAllocation}
*
@ -246,4 +249,20 @@ public class RoutingAllocation {
return decision;
}
}
/**
* Returns <code>true</code> iff the current allocation run has not processed all of the in-flight or available
* shard or store fetches. Otherwise <code>true</code>
*/
public boolean hasPendingAsyncFetch() {
return hasPendingAsyncFetch;
}
/**
* Sets a flag that signals that current allocation run has not processed all of the in-flight or available shard or store fetches.
* This state is anti-viral and can be reset in on allocation run.
*/
public void setHasPendingAsyncFetch() {
this.hasPendingAsyncFetch = true;
}
}

View File

@ -118,7 +118,8 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
@Override
public boolean allocateUnassigned(RoutingAllocation allocation) {
return rebalance(allocation);
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
return balancer.allocateUnassigned();
}
@Override
@ -313,6 +314,15 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
return delta <= (threshold + 0.001f);
}
/**
* Allocates all possible unassigned shards
* @return <code>true</code> if the current configuration has been
* changed, otherwise <code>false</code>
*/
final boolean allocateUnassigned() {
return balance(true);
}
/**
* Balances the nodes on the cluster model according to the weight
* function. The configured threshold is the minimum delta between the
@ -328,16 +338,24 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
* changed, otherwise <code>false</code>
*/
public boolean balance() {
return balance(false);
}
private boolean balance(boolean onlyAssign) {
if (this.nodes.isEmpty()) {
/* with no nodes this is pointless */
return false;
}
if (logger.isTraceEnabled()) {
if (onlyAssign) {
logger.trace("Start balancing cluster");
} else {
logger.trace("Start assigning unassigned shards");
}
}
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned().transactionBegin();
boolean changed = initialize(routingNodes, unassigned);
if (!changed && allocation.deciders().canRebalance(allocation).type() == Type.YES) {
if (onlyAssign == false && changed == false && allocation.deciders().canRebalance(allocation).type() == Type.YES) {
NodeSorter sorter = newNodeSorter();
if (nodes.size() > 1) { /* skip if we only have one node */
for (String index : buildWeightOrderedIndidces(Operation.BALANCE, sorter)) {

View File

@ -76,7 +76,19 @@ public class ShardsAllocators extends AbstractComponent implements ShardsAllocat
@Override
public boolean rebalance(RoutingAllocation allocation) {
if (allocation.hasPendingAsyncFetch() == false) {
/*
* see https://github.com/elastic/elasticsearch/issues/14387
* if we allow rebalance operations while we are still fetching shard store data
* we might end up with unnecessary rebalance operations which can be super confusion/frustrating
* since once the fetches come back we might just move all the shards back again.
* Therefore we only do a rebalance if we have fetched all information.
*/
return allocator.rebalance(allocation);
} else {
logger.debug("skipping rebalance due to in-flight shard/store fetches");
return false;
}
}
@Override

View File

@ -24,13 +24,16 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
/**
* This {@link AllocationDecider} limits the number of shards per node on a per
* index basis. The allocator prevents a single node to hold more than
* {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index during the allocation
* index or node-wide basis. The allocator prevents a single node to hold more
* than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and
* {@value #CLUSTER_TOTAL_SHARDS_PER_NODE} globally during the allocation
* process. The limits of this decider can be changed in real-time via a the
* index settings API.
* <p>
@ -50,66 +53,140 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
public static final String NAME = "shards_limit";
private volatile int clusterShardLimit;
/**
* Controls the maximum number of shards per index on a single Elasticsearch
* node. Negative values are interpreted as unlimited.
*/
public static final String INDEX_TOTAL_SHARDS_PER_NODE = "index.routing.allocation.total_shards_per_node";
/**
* Controls the maximum number of shards per node on a global level.
* Negative values are interpreted as unlimited.
*/
public static final String CLUSTER_TOTAL_SHARDS_PER_NODE = "cluster.routing.allocation.total_shards_per_node";
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
Integer newClusterLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, null);
if (newClusterLimit != null) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_TOTAL_SHARDS_PER_NODE,
ShardsLimitAllocationDecider.this.clusterShardLimit, newClusterLimit);
ShardsLimitAllocationDecider.this.clusterShardLimit = newClusterLimit;
}
}
}
@Inject
public ShardsLimitAllocationDecider(Settings settings) {
public ShardsLimitAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings);
this.clusterShardLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, -1);
nodeSettingsService.addListener(new ApplySettings());
}
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
int totalShardsPerNode = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
if (totalShardsPerNode <= 0) {
return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [%d] <= 0", totalShardsPerNode);
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
// Capture the limit here in case it changes during this method's
// execution
final int clusterShardLimit = this.clusterShardLimit;
if (indexShardLimit <= 0 && clusterShardLimit <= 0) {
return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [index: %d, cluster: %d] <= 0",
indexShardLimit, clusterShardLimit);
}
int nodeCount = 0;
int indexShardCount = 0;
int nodeShardCount = 0;
for (ShardRouting nodeShard : node) {
if (!nodeShard.index().equals(shardRouting.index())) {
continue;
}
// don't count relocating shards...
if (nodeShard.relocating()) {
continue;
}
nodeCount++;
nodeShardCount++;
if (nodeShard.index().equals(shardRouting.index())) {
indexShardCount++;
}
if (nodeCount >= totalShardsPerNode) {
return allocation.decision(Decision.NO, NAME, "too many shards for this index on node [%d], limit: [%d]",
nodeCount, totalShardsPerNode);
}
return allocation.decision(Decision.YES, NAME, "shard count under limit [%d] of total shards per node", totalShardsPerNode);
if (clusterShardLimit > 0 && nodeShardCount >= clusterShardLimit) {
return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], limit: [%d]",
nodeShardCount, clusterShardLimit);
}
if (indexShardLimit > 0 && indexShardCount >= indexShardLimit) {
return allocation.decision(Decision.NO, NAME, "too many shards for this index [%s] on node [%d], limit: [%d]",
shardRouting.index(), indexShardCount, indexShardLimit);
}
return allocation.decision(Decision.YES, NAME, "shard count under index limit [%d] and node limit [%d] of total shards per node",
indexShardLimit, clusterShardLimit);
}
@Override
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
int totalShardsPerNode = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
if (totalShardsPerNode <= 0) {
return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [%d] <= 0", totalShardsPerNode);
int indexShardLimit = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
// Capture the limit here in case it changes during this method's
// execution
final int clusterShardLimit = this.clusterShardLimit;
if (indexShardLimit <= 0 && clusterShardLimit <= 0) {
return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [index: %d, cluster: %d] <= 0",
indexShardLimit, clusterShardLimit);
}
int nodeCount = 0;
int indexShardCount = 0;
int nodeShardCount = 0;
for (ShardRouting nodeShard : node) {
if (!nodeShard.index().equals(shardRouting.index())) {
continue;
}
// don't count relocating shards...
if (nodeShard.relocating()) {
continue;
}
nodeCount++;
}
if (nodeCount > totalShardsPerNode) {
return allocation.decision(Decision.NO, NAME, "too many shards for this index on node [%d], limit: [%d]",
nodeCount, totalShardsPerNode);
}
return allocation.decision(Decision.YES, NAME, "shard count under limit [%d] of total shards per node", totalShardsPerNode);
nodeShardCount++;
if (nodeShard.index().equals(shardRouting.index())) {
indexShardCount++;
}
}
// Subtle difference between the `canAllocate` and `canRemain` is that
// this checks > while canAllocate checks >=
if (clusterShardLimit > 0 && nodeShardCount > clusterShardLimit) {
return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], limit: [%d]",
nodeShardCount, clusterShardLimit);
}
if (indexShardLimit > 0 && indexShardCount > indexShardLimit) {
return allocation.decision(Decision.NO, NAME, "too many shards for this index [%s] on node [%d], limit: [%d]",
shardRouting.index(), indexShardCount, indexShardLimit);
}
return allocation.decision(Decision.YES, NAME, "shard count under index limit [%d] and node limit [%d] of total shards per node",
indexShardLimit, clusterShardLimit);
}
@Override
public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
// Only checks the node-level limit, not the index-level
// Capture the limit here in case it changes during this method's
// execution
final int clusterShardLimit = this.clusterShardLimit;
if (clusterShardLimit <= 0) {
return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [cluster: %d] <= 0",
clusterShardLimit);
}
int nodeShardCount = 0;
for (ShardRouting nodeShard : node) {
// don't count relocating shards...
if (nodeShard.relocating()) {
continue;
}
nodeShardCount++;
}
if (clusterShardLimit >= 0 && nodeShardCount >= clusterShardLimit) {
return allocation.decision(Decision.NO, NAME, "too many shards for this node [%d], limit: [%d]",
nodeShardCount, clusterShardLimit);
}
return allocation.decision(Decision.YES, NAME, "shard count under node limit [%d] of total shards per node",
clusterShardLimit);
}
}

View File

@ -1,78 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.blobstore.support;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.Streams;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Temporary compatibility interface.
*
* This class should be removed after S3 and Azure containers migrate to the new model
*/
@Deprecated
public abstract class AbstractLegacyBlobContainer extends AbstractBlobContainer {
protected AbstractLegacyBlobContainer(BlobPath path) {
super(path);
}
/**
* Creates a new {@link InputStream} for the given blob name
* <p>
* This method is deprecated and is used only for compatibility with older blob containers
* The new blob containers should use readBlob/writeBlob methods instead
*/
@Deprecated
protected abstract InputStream openInput(String blobName) throws IOException;
/**
* Creates a new OutputStream for the given blob name
* <p>
* This method is deprecated and is used only for compatibility with older blob containers
* The new blob containers should override readBlob/writeBlob methods instead
*/
@Deprecated
protected abstract OutputStream createOutput(String blobName) throws IOException;
@Override
public InputStream readBlob(String blobName) throws IOException {
return openInput(blobName);
}
@Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
try (OutputStream stream = createOutput(blobName)) {
Streams.copy(inputStream, stream);
}
}
@Override
public void writeBlob(String blobName, BytesReference data) throws IOException {
try (OutputStream stream = createOutput(blobName)) {
data.writeTo(stream);
}
}
}

View File

@ -64,8 +64,9 @@ public final class AllTermQuery extends Query {
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1f) {
return super.rewrite(reader);
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
boolean fieldExists = false;
boolean hasPayloads = false;
@ -80,14 +81,10 @@ public final class AllTermQuery extends Query {
}
}
if (fieldExists == false) {
Query rewritten = new MatchNoDocsQuery();
rewritten.setBoost(getBoost());
return rewritten;
return new MatchNoDocsQuery();
}
if (hasPayloads == false) {
TermQuery rewritten = new TermQuery(term);
rewritten.setBoost(getBoost());
return rewritten;
return new TermQuery(term);
}
return this;
}

View File

@ -35,10 +35,7 @@ import org.elasticsearch.common.io.FastStringReader;
import java.io.IOException;
import java.io.Reader;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.*;
/**
*
@ -79,29 +76,17 @@ public class MoreLikeThisQuery extends Query {
@Override
public int hashCode() {
int result = boostTerms ? 1 : 0;
result = 31 * result + Float.floatToIntBits(boostTermsFactor);
result = 31 * result + Arrays.hashCode(likeText);
result = 31 * result + maxDocFreq;
result = 31 * result + maxQueryTerms;
result = 31 * result + maxWordLen;
result = 31 * result + minDocFreq;
result = 31 * result + minTermFrequency;
result = 31 * result + minWordLen;
result = 31 * result + Arrays.hashCode(moreLikeFields);
result = 31 * result + minimumShouldMatch.hashCode();
result = 31 * result + (stopWords == null ? 0 : stopWords.hashCode());
result = 31 * result + Float.floatToIntBits(getBoost());
return result;
return Objects.hash(super.hashCode(), boostTerms, boostTermsFactor, Arrays.hashCode(likeText),
maxDocFreq, maxQueryTerms, maxWordLen, minDocFreq, minTermFrequency, minWordLen,
Arrays.hashCode(moreLikeFields), minimumShouldMatch, stopWords);
}
@Override
public boolean equals(Object obj) {
if (obj == null || getClass() != obj.getClass())
if (super.equals(obj) == false) {
return false;
}
MoreLikeThisQuery other = (MoreLikeThisQuery) obj;
if (getBoost() != other.getBoost())
return false;
if (!analyzer.equals(other.analyzer))
return false;
if (boostTerms != other.boostTerms)
@ -141,6 +126,10 @@ public class MoreLikeThisQuery extends Query {
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
XMoreLikeThis mlt = new XMoreLikeThis(reader, similarity == null ? new DefaultSimilarity() : similarity);
mlt.setFieldNames(moreLikeFields);
@ -179,10 +168,7 @@ public class MoreLikeThisQuery extends Query {
mltQuery = Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch);
bqBuilder.add(mltQuery, BooleanClause.Occur.SHOULD);
}
BooleanQuery bq = bqBuilder.build();
bq.setBoost(getBoost());
return bq;
return bqBuilder.build();
}
private void handleUnlike(XMoreLikeThis mlt, String[] unlikeText, Fields[] unlikeFields) throws IOException {

View File

@ -120,8 +120,9 @@ public class MultiPhrasePrefixQuery extends Query {
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1.0F) {
return super.rewrite(reader);
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
if (termArrays.isEmpty()) {
return new MatchNoDocsQuery();
@ -145,7 +146,6 @@ public class MultiPhrasePrefixQuery extends Query {
return Queries.newMatchNoDocsQuery();
}
query.add(terms.toArray(Term.class), position);
query.setBoost(getBoost());
return query.rewrite(reader);
}
@ -233,10 +233,11 @@ public class MultiPhrasePrefixQuery extends Query {
*/
@Override
public boolean equals(Object o) {
if (!(o instanceof MultiPhrasePrefixQuery)) return false;
if (super.equals(o) == false) {
return false;
}
MultiPhrasePrefixQuery other = (MultiPhrasePrefixQuery) o;
return this.getBoost() == other.getBoost()
&& this.slop == other.slop
return this.slop == other.slop
&& termArraysEquals(this.termArrays, other.termArrays)
&& this.positions.equals(other.positions);
}
@ -246,11 +247,10 @@ public class MultiPhrasePrefixQuery extends Query {
*/
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost())
return super.hashCode()
^ slop
^ termArraysHashCode()
^ positions.hashCode()
^ 0x4AC65113;
^ positions.hashCode();
}
// Breakout calculation of the termArrays hashcode

View File

@ -70,7 +70,7 @@ public class Queries {
.build();
}
public static boolean isNegativeQuery(Query q) {
private static boolean isNegativeQuery(Query q) {
if (!(q instanceof BooleanQuery)) {
return false;
}
@ -107,7 +107,7 @@ public class Queries {
return false;
}
public static BooleanQuery applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) {
public static Query applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) {
if (minimumShouldMatch == null) {
return query;
}
@ -127,11 +127,14 @@ public class Queries {
}
builder.setMinimumNumberShouldMatch(msm);
BooleanQuery bq = builder.build();
bq.setBoost(query.getBoost());
query = bq;
if (query.getBoost() != 1f) {
return new BoostQuery(bq, query.getBoost());
}
return bq;
} else {
return query;
}
}
private static Pattern spaceAroundLessThanPattern = Pattern.compile("(\\s+<\\s*)|(\\s*<\\s+)");
private static Pattern spacePattern = Pattern.compile(" ");

View File

@ -670,14 +670,14 @@ public final class XMoreLikeThis {
float bestScore = -1;
while ((scoreTerm = q.pop()) != null) {
TermQuery tq = new TermQuery(new Term(scoreTerm.topField, scoreTerm.word));
Query tq = new TermQuery(new Term(scoreTerm.topField, scoreTerm.word));
if (boost) {
if (bestScore == -1) {
bestScore = (scoreTerm.score);
}
float myScore = (scoreTerm.score);
tq.setBoost(boostFactor * myScore / bestScore);
tq = new BoostQuery(tq, boostFactor * myScore / bestScore);
}
try {

View File

@ -123,8 +123,9 @@ public class FiltersFunctionScoreQuery extends Query {
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1.0F) {
return super.rewrite(reader);
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
Query newQ = subQuery.rewrite(reader);
if (newQ == subQuery)

View File

@ -71,8 +71,9 @@ public class FunctionScoreQuery extends Query {
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1.0F) {
return super.rewrite(reader);
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
Query newQ = subQuery.rewrite(reader);
if (newQ == subQuery) {

View File

@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.zen.NotMasterException;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;

View File

@ -65,6 +65,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
if (shardState.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore();
continue;
}

View File

@ -139,6 +139,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(shard, allocation);
if (shardStores.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard stores", shard);
allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore();
continue; // still fetching
}

View File

@ -28,8 +28,8 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData;
import org.elasticsearch.index.fielddata.plain.DisabledIndexFieldData;
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
import org.elasticsearch.index.fielddata.plain.GeoPointBinaryDVIndexFieldData;
import org.elasticsearch.index.fielddata.plain.GeoPointDoubleArrayIndexFieldData;
import org.elasticsearch.index.fielddata.plain.AbstractGeoPointDVIndexFieldData;
import org.elasticsearch.index.fielddata.plain.GeoPointArrayIndexFieldData;
import org.elasticsearch.index.fielddata.plain.IndexIndexFieldData;
import org.elasticsearch.index.fielddata.plain.PagedBytesIndexFieldData;
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
@ -85,7 +85,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
buildersByTypeBuilder.put("short", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("int", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("long", MISSING_DOC_VALUES_BUILDER);
buildersByTypeBuilder.put("geo_point", new GeoPointDoubleArrayIndexFieldData.Builder());
buildersByTypeBuilder.put("geo_point", new GeoPointArrayIndexFieldData.Builder());
buildersByTypeBuilder.put(ParentFieldMapper.NAME, new ParentChildIndexFieldData.Builder());
buildersByTypeBuilder.put(IndexFieldMapper.NAME, new IndexIndexFieldData.Builder());
buildersByTypeBuilder.put("binary", new DisabledIndexFieldData.Builder());
@ -101,7 +101,7 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
.put("short", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.SHORT))
.put("int", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.INT))
.put("long", new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG))
.put("geo_point", new GeoPointBinaryDVIndexFieldData.Builder())
.put("geo_point", new AbstractGeoPointDVIndexFieldData.Builder())
.put("binary", new BytesBinaryDVIndexFieldData.Builder())
.put(BooleanFieldMapper.CONTENT_TYPE, new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.BOOLEAN))
.immutableMap();
@ -129,8 +129,8 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Clo
.put(Tuple.tuple("long", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder().numericType(IndexNumericFieldData.NumericType.LONG))
.put(Tuple.tuple("long", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
.put(Tuple.tuple("geo_point", ARRAY_FORMAT), new GeoPointDoubleArrayIndexFieldData.Builder())
.put(Tuple.tuple("geo_point", DOC_VALUES_FORMAT), new GeoPointBinaryDVIndexFieldData.Builder())
.put(Tuple.tuple("geo_point", ARRAY_FORMAT), new GeoPointArrayIndexFieldData.Builder())
.put(Tuple.tuple("geo_point", DOC_VALUES_FORMAT), new AbstractGeoPointDVIndexFieldData.Builder())
.put(Tuple.tuple("geo_point", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
.put(Tuple.tuple("binary", DOC_VALUES_FORMAT), new BytesBinaryDVIndexFieldData.Builder())

View File

@ -30,7 +30,7 @@ import java.util.Collections;
/**
*/
abstract class AbstractAtomicGeoPointFieldData implements AtomicGeoPointFieldData {
public abstract class AbstractAtomicGeoPointFieldData implements AtomicGeoPointFieldData {
@Override
public final SortedBinaryDocValues getBytesValues() {

View File

@ -21,11 +21,16 @@ package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DocValues;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.*;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MappedFieldType.Names;
import org.elasticsearch.index.mapper.MapperService;
@ -34,9 +39,9 @@ import org.elasticsearch.search.MultiValueMode;
import java.io.IOException;
public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData implements IndexGeoPointFieldData {
public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFieldData implements IndexGeoPointFieldData {
public GeoPointBinaryDVIndexFieldData(Index index, Names fieldNames, FieldDataType fieldDataType) {
AbstractGeoPointDVIndexFieldData(Index index, Names fieldNames, FieldDataType fieldDataType) {
super(index, fieldNames, fieldDataType);
}
@ -45,10 +50,24 @@ public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData impl
throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance");
}
/**
* Lucene 5.4 GeoPointFieldType
*/
public static class GeoPointDVIndexFieldData extends AbstractGeoPointDVIndexFieldData {
final boolean indexCreatedBefore2x;
public GeoPointDVIndexFieldData(Index index, Names fieldNames, FieldDataType fieldDataType, final boolean indexCreatedBefore2x) {
super(index, fieldNames, fieldDataType);
this.indexCreatedBefore2x = indexCreatedBefore2x;
}
@Override
public AtomicGeoPointFieldData load(LeafReaderContext context) {
try {
return new GeoPointBinaryDVAtomicFieldData(DocValues.getBinary(context.reader(), fieldNames.indexName()));
if (indexCreatedBefore2x) {
return new GeoPointLegacyDVAtomicFieldData(DocValues.getBinary(context.reader(), fieldNames.indexName()));
}
return new GeoPointDVAtomicFieldData(DocValues.getSortedNumeric(context.reader(), fieldNames.indexName()));
} catch (IOException e) {
throw new IllegalStateException("Cannot load doc values", e);
}
@ -58,16 +77,16 @@ public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData impl
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
return load(context);
}
}
public static class Builder implements IndexFieldData.Builder {
@Override
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
CircuitBreakerService breakerService, MapperService mapperService) {
// Ignore breaker
final Names fieldNames = fieldType.names();
return new GeoPointBinaryDVIndexFieldData(indexSettings.getIndex(), fieldNames, fieldType.fieldDataType());
}
return new GeoPointDVIndexFieldData(indexSettings.getIndex(), fieldType.names(), fieldType.fieldDataType(),
// norelease cut over to .before(Version.V_2_2_0) once GeoPointFieldV2 is completely merged
indexSettings.getIndexVersionCreated().onOrBefore(Version.CURRENT));
}
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator;
import org.apache.lucene.util.CharsRefBuilder;
import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.index.IndexSettings;
@ -33,15 +34,34 @@ import org.elasticsearch.search.MultiValueMode;
import java.io.IOException;
abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<AtomicGeoPointFieldData> implements IndexGeoPointFieldData {
protected abstract static class BaseGeoPointTermsEnum {
protected final BytesRefIterator termsEnum;
protected static class GeoPointEnum {
protected BaseGeoPointTermsEnum(BytesRefIterator termsEnum) {
this.termsEnum = termsEnum;
}
}
private final BytesRefIterator termsEnum;
protected static class GeoPointTermsEnum extends BaseGeoPointTermsEnum {
protected GeoPointTermsEnum(BytesRefIterator termsEnum) {
super(termsEnum);
}
public Long next() throws IOException {
final BytesRef term = termsEnum.next();
if (term == null) {
return null;
}
return NumericUtils.prefixCodedToLong(term);
}
}
protected static class GeoPointTermsEnumLegacy extends BaseGeoPointTermsEnum {
private final GeoPoint next;
private final CharsRefBuilder spare;
protected GeoPointEnum(BytesRefIterator termsEnum) {
this.termsEnum = termsEnum;
protected GeoPointTermsEnumLegacy(BytesRefIterator termsEnum) {
super(termsEnum);
next = new GeoPoint();
spare = new CharsRefBuilder();
}
@ -67,7 +87,6 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
final double lon = Double.parseDouble(new String(spare.chars(), commaIndex + 1, spare.length() - (commaIndex + 1)));
return next.reset(lat, lon);
}
}
public AbstractIndexGeoPointFieldData(IndexSettings indexSettings, Names fieldNames, FieldDataType fieldDataType, IndexFieldDataCache cache) {
@ -83,5 +102,4 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
protected AtomicGeoPointFieldData empty(int maxDoc) {
return AbstractAtomicGeoPointFieldData.empty(maxDoc);
}
}

View File

@ -0,0 +1,148 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.GeoUtils;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.index.fielddata.FieldData;
import org.elasticsearch.index.fielddata.GeoPointValues;
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
*
*/
public abstract class GeoPointArrayAtomicFieldData extends AbstractAtomicGeoPointFieldData {
@Override
public void close() {
}
static class WithOrdinals extends GeoPointArrayAtomicFieldData {
private final LongArray indexedPoints;
private final Ordinals ordinals;
private final int maxDoc;
public WithOrdinals(LongArray indexedPoints, Ordinals ordinals, int maxDoc) {
super();
this.indexedPoints = indexedPoints;
this.ordinals = ordinals;
this.maxDoc = maxDoc;
}
@Override
public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT + indexedPoints.ramBytesUsed();
}
@Override
public Collection<Accountable> getChildResources() {
List<Accountable> resources = new ArrayList<>();
resources.add(Accountables.namedAccountable("indexedPoints", indexedPoints));
return Collections.unmodifiableList(resources);
}
@Override
public MultiGeoPointValues getGeoPointValues() {
final RandomAccessOrds ords = ordinals.ordinals();
final SortedDocValues singleOrds = DocValues.unwrapSingleton(ords);
final GeoPoint point = new GeoPoint();
if (singleOrds != null) {
final GeoPointValues values = new GeoPointValues() {
@Override
public GeoPoint get(int docID) {
final int ord = singleOrds.getOrd(docID);
if (ord >= 0) {
return point.resetFromIndexHash(indexedPoints.get(ord));
}
// todo: same issue as in ParentChildIndexFieldData, handle issue upstream?
return null;
}
};
return FieldData.singleton(values, DocValues.docsWithValue(singleOrds, maxDoc));
}
return new MultiGeoPointValues() {
@Override
public GeoPoint valueAt(int index) {
return point.resetFromIndexHash(indexedPoints.get(ords.ordAt(index)));
}
@Override
public void setDocument(int docId) {
ords.setDocument(docId);
}
@Override
public int count() {
return ords.cardinality();
}
};
}
}
public static class Single extends GeoPointArrayAtomicFieldData {
private final LongArray indexedPoint;
private final BitSet set;
public Single(LongArray indexedPoint, BitSet set) {
this.indexedPoint = indexedPoint;
this.set = set;
}
@Override
public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT + indexedPoint.ramBytesUsed()
+ (set == null ? 0 : set.ramBytesUsed());
}
@Override
public Collection<Accountable> getChildResources() {
List<Accountable> resources = new ArrayList<>();
resources.add(Accountables.namedAccountable("indexedPoints", indexedPoint));
if (set != null) {
resources.add(Accountables.namedAccountable("missing bitset", set));
}
return Collections.unmodifiableList(resources);
}
@Override
public MultiGeoPointValues getGeoPointValues() {
final GeoPoint point = new GeoPoint();
final GeoPointValues values = new GeoPointValues() {
@Override
public GeoPoint get(int docID) {
return point.resetFromIndexHash(indexedPoint.get(docID));
}
};
return FieldData.singleton(values, set);
}
}
}

View File

@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.index.LeafReader;
@ -23,12 +24,18 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.Terms;
import org.apache.lucene.util.BitSet;
import org.elasticsearch.Version;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.DoubleArray;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.*;
import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData;
import org.elasticsearch.index.fielddata.FieldData;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.ordinals.Ordinals;
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
import org.elasticsearch.index.mapper.MappedFieldType;
@ -36,24 +43,28 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
/**
* Loads FieldData for an array of GeoPoints supporting both long encoded points and backward compatible double arrays
*/
public class GeoPointDoubleArrayIndexFieldData extends AbstractIndexGeoPointFieldData {
public class GeoPointArrayIndexFieldData extends AbstractIndexGeoPointFieldData {
private final CircuitBreakerService breakerService;
private final boolean indexCreatedBefore22;
public static class Builder implements IndexFieldData.Builder {
@Override
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
CircuitBreakerService breakerService, MapperService mapperService) {
return new GeoPointDoubleArrayIndexFieldData(indexSettings, fieldType.names(), fieldType.fieldDataType(), cache, breakerService);
return new GeoPointArrayIndexFieldData(indexSettings, fieldType.names(), fieldType.fieldDataType(), cache,
// norelease change to .before(Version.V_2_2_0) once GeoPointFieldV2 is completely merged
breakerService, indexSettings.getIndexVersionCreated().onOrBefore(Version.CURRENT));
}
}
public GeoPointDoubleArrayIndexFieldData(IndexSettings indexSettings, MappedFieldType.Names fieldNames,
FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService) {
public GeoPointArrayIndexFieldData(IndexSettings indexSettings, MappedFieldType.Names fieldNames,
FieldDataType fieldDataType, IndexFieldDataCache cache, CircuitBreakerService breakerService,
final boolean indexCreatedBefore22) {
super(indexSettings, fieldNames, fieldDataType, cache);
this.breakerService = breakerService;
this.indexCreatedBefore22 = indexCreatedBefore22;
}
@Override
@ -69,12 +80,66 @@ public class GeoPointDoubleArrayIndexFieldData extends AbstractIndexGeoPointFiel
estimator.afterLoad(null, data.ramBytesUsed());
return data;
}
return (indexCreatedBefore22 == true) ? loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
}
/**
* long encoded geopoint field data
*/
private AtomicGeoPointFieldData loadFieldData22(LeafReader reader, NonEstimatingEstimator estimator, Terms terms,
AtomicGeoPointFieldData data) throws Exception {
LongArray indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(128);
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio",
OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio)) {
final GeoPointTermsEnum iter = new GeoPointTermsEnum(builder.buildFromTerms(OrdinalsBuilder.wrapNumeric64Bit(terms.iterator())));
Long hashedPoint;
long numTerms = 0;
while ((hashedPoint = iter.next()) != null) {
indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.resize(indexedPoints, numTerms + 1);
indexedPoints.set(numTerms++, hashedPoint);
}
indexedPoints = BigArrays.NON_RECYCLING_INSTANCE.resize(indexedPoints, numTerms);
Ordinals build = builder.build(fieldDataType.getSettings());
RandomAccessOrds ordinals = build.ordinals();
if (!(FieldData.isMultiValued(ordinals) || CommonSettings.getMemoryStorageHint(fieldDataType) == CommonSettings
.MemoryStorageFormat.ORDINALS)) {
int maxDoc = reader.maxDoc();
LongArray sIndexedPoint = BigArrays.NON_RECYCLING_INSTANCE.newLongArray(reader.maxDoc());
for (int i=0; i<maxDoc; ++i) {
ordinals.setDocument(i);
long nativeOrdinal = ordinals.nextOrd();
if (nativeOrdinal != RandomAccessOrds.NO_MORE_ORDS) {
sIndexedPoint.set(i, indexedPoints.get(nativeOrdinal));
}
}
BitSet set = builder.buildDocsWithValuesSet();
data = new GeoPointArrayAtomicFieldData.Single(sIndexedPoint, set);
} else {
data = new GeoPointArrayAtomicFieldData.WithOrdinals(indexedPoints, build, reader.maxDoc());
}
success = true;
return data;
} finally {
if (success) {
estimator.afterLoad(null, data.ramBytesUsed());
}
}
}
/**
* Backward compatibility support for legacy lat/lon double arrays
*/
private AtomicGeoPointFieldData loadLegacyFieldData(LeafReader reader, NonEstimatingEstimator estimator, Terms terms,
AtomicGeoPointFieldData data) throws Exception {
DoubleArray lat = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(128);
DoubleArray lon = BigArrays.NON_RECYCLING_INSTANCE.newDoubleArray(128);
final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat("acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
boolean success = false;
try (OrdinalsBuilder builder = new OrdinalsBuilder(terms.size(), reader.maxDoc(), acceptableTransientOverheadRatio)) {
final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator()));
final GeoPointTermsEnumLegacy iter = new GeoPointTermsEnumLegacy(builder.buildFromTerms(terms.iterator()));
GeoPoint point;
long numTerms = 0;
while ((point = iter.next()) != null) {
@ -102,9 +167,9 @@ public class GeoPointDoubleArrayIndexFieldData extends AbstractIndexGeoPointFiel
}
}
BitSet set = builder.buildDocsWithValuesSet();
data = new GeoPointDoubleArrayAtomicFieldData.Single(sLon, sLat, set);
data = new GeoPointArrayLegacyAtomicFieldData.Single(sLon, sLat, set);
} else {
data = new GeoPointDoubleArrayAtomicFieldData.WithOrdinals(lon, lat, build, reader.maxDoc());
data = new GeoPointArrayLegacyAtomicFieldData.WithOrdinals(lon, lat, build, reader.maxDoc());
}
success = true;
return data;
@ -112,8 +177,6 @@ public class GeoPointDoubleArrayIndexFieldData extends AbstractIndexGeoPointFiel
if (success) {
estimator.afterLoad(null, data.ramBytesUsed());
}
}
}
}
}

View File

@ -39,13 +39,13 @@ import java.util.List;
/**
*/
public abstract class GeoPointDoubleArrayAtomicFieldData extends AbstractAtomicGeoPointFieldData {
public abstract class GeoPointArrayLegacyAtomicFieldData extends AbstractAtomicGeoPointFieldData {
@Override
public void close() {
}
static class WithOrdinals extends GeoPointDoubleArrayAtomicFieldData {
static class WithOrdinals extends GeoPointArrayLegacyAtomicFieldData {
private final DoubleArray lon, lat;
private final Ordinals ordinals;
@ -117,7 +117,7 @@ public abstract class GeoPointDoubleArrayAtomicFieldData extends AbstractAtomicG
/**
* Assumes unset values are marked in bitset, and docId is used as the index to the value array.
*/
public static class Single extends GeoPointDoubleArrayAtomicFieldData {
public static class Single extends GeoPointArrayLegacyAtomicFieldData {
private final DoubleArray lon, lat;
private final BitSet set;
@ -130,7 +130,7 @@ public abstract class GeoPointDoubleArrayAtomicFieldData extends AbstractAtomicG
@Override
public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT/*size*/ + lon.ramBytesUsed() + lat.ramBytesUsed() + (set == null ? 0 : set.ramBytesUsed());
return RamUsageEstimator.NUM_BYTES_INT + lon.ramBytesUsed() + lat.ramBytesUsed() + (set == null ? 0 : set.ramBytesUsed());
}
@Override

View File

@ -0,0 +1,90 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata.plain;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
final class GeoPointDVAtomicFieldData extends AbstractAtomicGeoPointFieldData {
private final SortedNumericDocValues values;
GeoPointDVAtomicFieldData(SortedNumericDocValues values) {
super();
this.values = values;
}
@Override
public long ramBytesUsed() {
return 0; // not exposed by Lucene
}
@Override
public Collection<Accountable> getChildResources() {
return Collections.emptyList();
}
@Override
public void close() {
// no-op
}
@Override
public MultiGeoPointValues getGeoPointValues() {
return new MultiGeoPointValues() {
GeoPoint[] points = new GeoPoint[0];
private int count = 0;
@Override
public void setDocument(int docId) {
values.setDocument(docId);
count = values.count();
if (count > points.length) {
final int previousLength = points.length;
points = Arrays.copyOf(points, ArrayUtil.oversize(count, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
for (int i = previousLength; i < points.length; ++i) {
points[i] = new GeoPoint();
}
}
for (int i=0; i<count; ++i) {
points[i].resetFromIndexHash(values.valueAt(i));
}
}
@Override
public int count() {
return count;
}
@Override
public GeoPoint valueAt(int index) {
return points[index];
}
};
}
}

View File

@ -32,14 +32,14 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
final class GeoPointBinaryDVAtomicFieldData extends AbstractAtomicGeoPointFieldData {
final class GeoPointLegacyDVAtomicFieldData extends AbstractAtomicGeoPointFieldData {
private static final int COORDINATE_SIZE = 8; // number of bytes per coordinate
private static final int GEOPOINT_SIZE = COORDINATE_SIZE * 2; // lat + lon
private final BinaryDocValues values;
GeoPointBinaryDVAtomicFieldData(BinaryDocValues values) {
GeoPointLegacyDVAtomicFieldData(BinaryDocValues values) {
super();
this.values = values;
}
@ -97,5 +97,4 @@ final class GeoPointBinaryDVAtomicFieldData extends AbstractAtomicGeoPointFieldD
};
}
}

View File

@ -219,8 +219,9 @@ public class DateFieldMapper extends NumberFieldMapper {
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1.0F) {
return super.rewrite(reader);
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
return innerRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, timeZone, forcedDateParser);
}
@ -229,11 +230,9 @@ public class DateFieldMapper extends NumberFieldMapper {
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
LateParsingQuery that = (LateParsingQuery) o;
if (includeLower != that.includeLower) return false;
if (includeUpper != that.includeUpper) return false;
if (lowerTerm != null ? !lowerTerm.equals(that.lowerTerm) : that.lowerTerm != null) return false;
@ -245,13 +244,7 @@ public class DateFieldMapper extends NumberFieldMapper {
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (lowerTerm != null ? lowerTerm.hashCode() : 0);
result = 31 * result + (upperTerm != null ? upperTerm.hashCode() : 0);
result = 31 * result + (includeLower ? 1 : 0);
result = 31 * result + (includeUpper ? 1 : 0);
result = 31 * result + (timeZone != null ? timeZone.hashCode() : 0);
return result;
return Objects.hash(super.hashCode(), lowerTerm, upperTerm, includeLower, includeUpper, timeZone);
}
@Override

View File

@ -19,7 +19,10 @@
package org.elasticsearch.index.query;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.SpanBoostQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.ParseField;
@ -74,7 +77,13 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder> exte
public final Query toQuery(QueryShardContext context) throws IOException {
Query query = doToQuery(context);
if (query != null) {
setFinalBoost(query);
if (boost != DEFAULT_BOOST) {
if (query instanceof SpanQuery) {
query = new SpanBoostQuery((SpanQuery) query, boost);
} else {
query = new BoostQuery(query, boost);
}
}
if (queryName != null) {
context.addNamedQuery(queryName, query);
}
@ -82,20 +91,6 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder> exte
return query;
}
/**
* Sets the main boost to the query obtained by converting the current query into a lucene query.
* The default behaviour is to set the main boost, after verifying that we are not overriding any non default boost
* value that was previously set to the lucene query. That case would require some manual decision on how to combine
* the main boost with the boost coming from lucene by overriding this method.
* @throws IllegalStateException if the lucene query boost has already been set
*/
protected void setFinalBoost(Query query) {
if (query.getBoost() != AbstractQueryBuilder.DEFAULT_BOOST) {
throw new IllegalStateException("lucene query boost is already set, override setFinalBoost to define how to combine lucene boost with main boost");
}
query.setBoost(boost);
}
@Override
public final Query toFilter(QueryShardContext context) throws IOException {
Query result = null;

View File

@ -279,8 +279,8 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
} else {
minimumShouldMatch = this.minimumShouldMatch;
}
booleanQuery = Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch);
return adjustPureNegative ? fixNegativeQueryIfNeeded(booleanQuery) : booleanQuery;
Query query = Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch);
return adjustPureNegative ? fixNegativeQueryIfNeeded(query) : query;
}
private static void addBooleanClauses(QueryShardContext context, BooleanQuery.Builder booleanQueryBuilder, List<QueryBuilder> clauses, Occur occurs) throws IOException {

View File

@ -115,8 +115,6 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
private GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape, String indexedShapeId, String indexedShapeType) throws IOException {
this(fieldName, new BytesArray(new byte[1]), indexedShapeId, indexedShapeType);
if (shape != null) {
XContentBuilder builder = XContentFactory.jsonBuilder();
shape.toXContent(builder, EMPTY_PARAMS);
this.shapeBytes = shape.buildAsBytes(XContentType.JSON);
if (this.shapeBytes.length() == 0) {
throw new IllegalArgumentException("shape must not be empty");

View File

@ -79,7 +79,7 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> {
currentFieldName = parser.currentName();
token = parser.nextToken();
if (parseContext.parseFieldMatcher().match(currentFieldName, SHAPE_FIELD)) {
XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
XContentBuilder builder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
shape = builder.bytes();
} else if (parseContext.parseFieldMatcher().match(currentFieldName, STRATEGY_FIELD)) {
String strategyName = parser.text();

View File

@ -215,8 +215,6 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
if (innerQuery == null) {
return null;
}
innerQuery.setBoost(boost);
DocumentMapper childDocMapper = context.getMapperService().documentMapper(type);
if (childDocMapper == null) {
throw new QueryShardException(context, "[" + NAME + "] no mapping found for type [" + type + "]");
@ -286,8 +284,9 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (getBoost() != 1.0F) {
return super.rewrite(reader);
Query rewritten = super.rewrite(reader);
if (rewritten != this) {
return rewritten;
}
if (reader instanceof DirectoryReader) {
String joinField = ParentFieldMapper.joinField(parentType);
@ -310,8 +309,6 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
LateParsingQuery that = (LateParsingQuery) o;
@ -326,14 +323,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + toQuery.hashCode();
result = 31 * result + innerQuery.hashCode();
result = 31 * result + minChildren;
result = 31 * result + maxChildren;
result = 31 * result + parentType.hashCode();
result = 31 * result + scoreMode.hashCode();
return result;
return Objects.hash(super.hashCode(), toQuery, innerQuery, minChildren, maxChildren, parentType, scoreMode);
}
@Override

View File

@ -129,7 +129,6 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
if (innerQuery == null) {
return null;
}
innerQuery.setBoost(boost);
DocumentMapper parentDocMapper = context.getMapperService().documentMapper(type);
if (parentDocMapper == null) {
throw new QueryShardException(context, "[has_parent] query configured 'parent_type' [" + type

View File

@ -115,14 +115,6 @@ public class IndicesQueryBuilder extends AbstractQueryBuilder<IndicesQueryBuilde
return noMatchQuery.toQuery(context);
}
@Override
protected void setFinalBoost(Query query) {
if (boost != DEFAULT_BOOST) {
//if both the wrapped query and the wrapper hold a boost, the main one coming from the wrapper wins
query.setBoost(boost);
}
}
@Override
protected IndicesQueryBuilder doReadFrom(StreamInput in) throws IOException {
IndicesQueryBuilder indicesQueryBuilder = new IndicesQueryBuilder(in.readQuery(), in.readStringArray());

View File

@ -301,8 +301,7 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
}
/**
* Get the setting for handling zero terms queries.
* @see #zeroTermsQuery(ZeroTermsQuery)
* Returns the setting for handling zero terms queries.
*/
public MatchQuery.ZeroTermsQuery zeroTermsQuery() {
return this.zeroTermsQuery;

View File

@ -548,12 +548,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
return query;
}
@Override
protected void setFinalBoost(Query query) {
// we need to preserve the boost that came out of the parsing phase
query.setBoost(boost * query.getBoost());
}
private static Map<String, Float> handleFieldsMatchPattern(MapperService mapperService, Map<String, Float> fieldsBoosts) {
Map<String, Float> newFieldsBoosts = new TreeMap<>();
for (Map.Entry<String, Float> fieldBoost : fieldsBoosts.entrySet()) {

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index.query;
import org.apache.lucene.queryparser.classic.MapperQueryParser;
import org.apache.lucene.queryparser.classic.QueryParserSettings;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.automaton.Operations;
@ -36,10 +37,7 @@ import org.elasticsearch.index.query.support.QueryParsers;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.TreeMap;
import java.util.*;
/**
* A query that parses a query string and runs it. There are two modes that this operates. The first,
@ -722,16 +720,25 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
if (query == null) {
return null;
}
//save the BoostQuery wrapped structure if present
List<Float> boosts = new ArrayList<>();
while(query instanceof BoostQuery) {
BoostQuery boostQuery = (BoostQuery) query;
boosts.add(boostQuery.getBoost());
query = boostQuery.getQuery();
}
query = Queries.fixNegativeQueryIfNeeded(query);
if (query instanceof BooleanQuery) {
query = Queries.applyMinimumShouldMatch((BooleanQuery) query, this.minimumShouldMatch());
}
return query;
//restore the previous BoostQuery wrapping
for (int i = boosts.size() - 1; i >= 0; i--) {
query = new BoostQuery(query, boosts.get(i));
}
@Override
protected void setFinalBoost(Query query) {
//we need to preserve the boost that came out of the parsing phase
query.setBoost(query.getBoost() * boost);
return query;
}
}

View File

@ -104,10 +104,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + Objects.hashCode(script);
return result;
return Objects.hash(super.hashCode(), script);
}
@Override

View File

@ -63,8 +63,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
try {
Query q = createBooleanQuery(entry.getKey(), text, super.getDefaultOperator());
if (q != null) {
q.setBoost(entry.getValue());
bq.add(q, BooleanClause.Occur.SHOULD);
bq.add(wrapWithBoost(q, entry.getValue()), BooleanClause.Occur.SHOULD);
}
} catch (RuntimeException e) {
rethrowUnlessLenient(e);
@ -86,9 +85,8 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
bq.setDisableCoord(true);
for (Map.Entry<String,Float> entry : weights.entrySet()) {
try {
Query q = new FuzzyQuery(new Term(entry.getKey(), text), fuzziness);
q.setBoost(entry.getValue());
bq.add(q, BooleanClause.Occur.SHOULD);
Query query = new FuzzyQuery(new Term(entry.getKey(), text), fuzziness);
bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD);
} catch (RuntimeException e) {
rethrowUnlessLenient(e);
}
@ -104,8 +102,7 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
try {
Query q = createPhraseQuery(entry.getKey(), text, slop);
if (q != null) {
q.setBoost(entry.getValue());
bq.add(q, BooleanClause.Occur.SHOULD);
bq.add(wrapWithBoost(q, entry.getValue()), BooleanClause.Occur.SHOULD);
}
} catch (RuntimeException e) {
rethrowUnlessLenient(e);
@ -129,12 +126,12 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
try {
if (settings.analyzeWildcard()) {
Query analyzedQuery = newPossiblyAnalyzedQuery(entry.getKey(), text);
analyzedQuery.setBoost(entry.getValue());
bq.add(analyzedQuery, BooleanClause.Occur.SHOULD);
if (analyzedQuery != null) {
bq.add(wrapWithBoost(analyzedQuery, entry.getValue()), BooleanClause.Occur.SHOULD);
}
} else {
PrefixQuery prefix = new PrefixQuery(new Term(entry.getKey(), text));
prefix.setBoost(entry.getValue());
bq.add(prefix, BooleanClause.Occur.SHOULD);
Query query = new PrefixQuery(new Term(entry.getKey(), text));
bq.add(wrapWithBoost(query, entry.getValue()), BooleanClause.Occur.SHOULD);
}
} catch (RuntimeException e) {
return rethrowUnlessLenient(e);
@ -143,6 +140,13 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp
return super.simplify(bq.build());
}
private static Query wrapWithBoost(Query query, float boost) {
if (boost != AbstractQueryBuilder.DEFAULT_BOOST) {
return new BoostQuery(query, boost);
}
return query;
}
/**
* Analyze the given string using its analyzer, constructing either a
* {@code PrefixQuery} or a {@code BooleanQuery} made up

View File

@ -299,11 +299,6 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
return fieldName;
}
@Override
protected void setFinalBoost(Query query) {
query.setBoost(boost * query.getBoost());
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);

View File

@ -18,9 +18,12 @@
*/
package org.elasticsearch.index.query;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.SpanBoostQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanQuery;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -62,11 +65,23 @@ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder<SpanMultiTer
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
Query subQuery = multiTermQueryBuilder.toQuery(context);
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
if (subQuery instanceof BoostQuery) {
BoostQuery boostQuery = (BoostQuery) subQuery;
subQuery = boostQuery.getQuery();
boost = boostQuery.getBoost();
}
//no MultiTermQuery extends SpanQuery, so SpanBoostQuery is not supported here
assert subQuery instanceof SpanBoostQuery == false;
if (subQuery instanceof MultiTermQuery == false) {
throw new UnsupportedOperationException("unsupported inner query, should be " + MultiTermQuery.class.getName() +" but was "
+ subQuery.getClass().getName());
}
return new SpanMultiTermQueryWrapper<>((MultiTermQuery) subQuery);
SpanQuery wrapper = new SpanMultiTermQueryWrapper<>((MultiTermQuery) subQuery);
if (boost != AbstractQueryBuilder.DEFAULT_BOOST) {
wrapper = new SpanBoostQuery(wrapper, boost);
}
return wrapper;
}
@Override

View File

@ -110,11 +110,6 @@ public class TemplateQueryBuilder extends AbstractQueryBuilder<TemplateQueryBuil
}
}
@Override
protected void setFinalBoost(Query query) {
//no-op this query doesn't support boost
}
@Override
protected TemplateQueryBuilder doReadFrom(StreamInput in) throws IOException {
TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(Template.readTemplate(in));

View File

@ -115,11 +115,6 @@ public class WrapperQueryBuilder extends AbstractQueryBuilder<WrapperQueryBuilde
}
}
@Override
protected void setFinalBoost(Query query) {
//no-op this query doesn't support boost
}
@Override
protected WrapperQueryBuilder doReadFrom(StreamInput in) throws IOException {
return new WrapperQueryBuilder(in.readByteArray());

View File

@ -22,14 +22,12 @@ package org.elasticsearch.index.search;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.BlendedTermQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.*;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
@ -56,8 +54,8 @@ public class MultiMatchQuery extends MatchQuery {
if (query instanceof BooleanQuery) {
query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
}
if (boostValue != null && query != null) {
query.setBoost(boostValue);
if (query != null && boostValue != null && boostValue != AbstractQueryBuilder.DEFAULT_BOOST) {
query = new BoostQuery(query, boostValue);
}
return query;
}
@ -167,13 +165,13 @@ public class MultiMatchQuery extends MatchQuery {
Analyzer actualAnalyzer = getAnalyzer(fieldType);
name = fieldType.names().indexName();
if (!groups.containsKey(actualAnalyzer)) {
groups.put(actualAnalyzer, new ArrayList<FieldAndFieldType>());
groups.put(actualAnalyzer, new ArrayList<>());
}
Float boost = entry.getValue();
boost = boost == null ? Float.valueOf(1.0f) : boost;
groups.get(actualAnalyzer).add(new FieldAndFieldType(name, fieldType, boost));
} else {
missing.add(new Tuple(name, entry.getValue()));
missing.add(new Tuple<>(name, entry.getValue()));
}
}

View File

@ -30,6 +30,7 @@ import org.elasticsearch.index.fielddata.IndexGeoPointFieldData;
import org.elasticsearch.index.fielddata.MultiGeoPointValues;
import java.io.IOException;
import java.util.Objects;
/**
*
@ -94,11 +95,7 @@ public class InMemoryGeoBoundingBoxQuery extends Query {
@Override
public int hashCode() {
int h = super.hashCode();
h = 31 * h + fieldName().hashCode();
h = 31 * h + topLeft.hashCode();
h = 31 * h + bottomRight.hashCode();
return h;
return Objects.hash(super.hashCode(), fieldName(), topLeft, bottomRight);
}
private static class Meridian180GeoBoundingBoxBits implements Bits {

View File

@ -1,295 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.search.nested;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.join.BitSetProducer;
import org.apache.lucene.util.BitSet;
import java.io.IOException;
import java.util.Collection;
import java.util.Set;
/**
* A special query that accepts a top level parent matching query, and returns the nested docs of the matching parent
* doc as well. This is handy when deleting by query, don't use it for other purposes.
*
* @elasticsearch.internal
*/
public class IncludeNestedDocsQuery extends Query {
private final BitSetProducer parentFilter;
private final Query parentQuery;
// If we are rewritten, this is the original childQuery we
// were passed; we use this for .equals() and
// .hashCode(). This makes rewritten query equal the
// original, so that user does not have to .rewrite() their
// query before searching:
private final Query origParentQuery;
public IncludeNestedDocsQuery(Query parentQuery, BitSetProducer parentFilter) {
this.origParentQuery = parentQuery;
this.parentQuery = parentQuery;
this.parentFilter = parentFilter;
}
// For rewriting
IncludeNestedDocsQuery(Query rewrite, Query originalQuery, IncludeNestedDocsQuery previousInstance) {
this.origParentQuery = originalQuery;
this.parentQuery = rewrite;
this.parentFilter = previousInstance.parentFilter;
setBoost(previousInstance.getBoost());
}
// For cloning
IncludeNestedDocsQuery(Query originalQuery, IncludeNestedDocsQuery previousInstance) {
this.origParentQuery = originalQuery;
this.parentQuery = originalQuery;
this.parentFilter = previousInstance.parentFilter;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new IncludeNestedDocsWeight(this, parentQuery, parentQuery.createWeight(searcher, needsScores), parentFilter);
}
static class IncludeNestedDocsWeight extends Weight {
private final Query parentQuery;
private final Weight parentWeight;
private final BitSetProducer parentsFilter;
IncludeNestedDocsWeight(Query query, Query parentQuery, Weight parentWeight, BitSetProducer parentsFilter) {
super(query);
this.parentQuery = parentQuery;
this.parentWeight = parentWeight;
this.parentsFilter = parentsFilter;
}
@Override
public void extractTerms(Set<Term> terms) {
parentWeight.extractTerms(terms);
}
@Override
public void normalize(float norm, float topLevelBoost) {
parentWeight.normalize(norm, topLevelBoost);
}
@Override
public float getValueForNormalization() throws IOException {
return parentWeight.getValueForNormalization(); // this query is never boosted so just delegate...
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final Scorer parentScorer = parentWeight.scorer(context);
// no matches
if (parentScorer == null) {
return null;
}
BitSet parents = parentsFilter.getBitSet(context);
if (parents == null) {
// No matches
return null;
}
int firstParentDoc = parentScorer.nextDoc();
if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
// No matches
return null;
}
return new IncludeNestedDocsScorer(this, parentScorer, parents, firstParentDoc);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return null; //Query is used internally and not by users, so explain can be empty
}
}
static class IncludeNestedDocsScorer extends Scorer {
final Scorer parentScorer;
final BitSet parentBits;
int currentChildPointer = -1;
int currentParentPointer = -1;
int currentDoc = -1;
IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, BitSet parentBits, int currentParentPointer) {
super(weight);
this.parentScorer = parentScorer;
this.parentBits = parentBits;
this.currentParentPointer = currentParentPointer;
if (currentParentPointer == 0) {
currentChildPointer = 0;
} else {
this.currentChildPointer = this.parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, we delete from doc 0
currentChildPointer = 0;
} else {
currentChildPointer++; // we only care about children
}
}
currentDoc = currentChildPointer;
}
@Override
public Collection<ChildScorer> getChildren() {
return parentScorer.getChildren();
}
@Override
public int nextDoc() throws IOException {
if (currentParentPointer == NO_MORE_DOCS) {
return (currentDoc = NO_MORE_DOCS);
}
if (currentChildPointer == currentParentPointer) {
// we need to return the current parent as well, but prepare to return
// the next set of children
currentDoc = currentParentPointer;
currentParentPointer = parentScorer.nextDoc();
if (currentParentPointer != NO_MORE_DOCS) {
currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, just set the child to the current parent
currentChildPointer = currentParentPointer;
} else {
currentChildPointer++; // we only care about children
}
}
} else {
currentDoc = currentChildPointer++;
}
assert currentDoc != -1;
return currentDoc;
}
@Override
public int advance(int target) throws IOException {
if (target == NO_MORE_DOCS) {
return (currentDoc = NO_MORE_DOCS);
}
if (target == 0) {
return nextDoc();
}
if (target < currentParentPointer) {
currentDoc = currentParentPointer = parentScorer.advance(target);
if (currentParentPointer == NO_MORE_DOCS) {
return (currentDoc = NO_MORE_DOCS);
}
if (currentParentPointer == 0) {
currentChildPointer = 0;
} else {
currentChildPointer = parentBits.prevSetBit(currentParentPointer - 1);
if (currentChildPointer == -1) {
// no previous set parent, just set the child to 0 to delete all up to the parent
currentChildPointer = 0;
} else {
currentChildPointer++; // we only care about children
}
}
} else {
currentDoc = currentChildPointer++;
}
return currentDoc;
}
@Override
public float score() throws IOException {
return parentScorer.score();
}
@Override
public int freq() throws IOException {
return parentScorer.freq();
}
@Override
public int docID() {
return currentDoc;
}
@Override
public long cost() {
return parentScorer.cost();
}
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
final Query parentRewrite = parentQuery.rewrite(reader);
if (parentRewrite != parentQuery) {
return new IncludeNestedDocsQuery(parentRewrite, parentQuery, this);
} else {
return this;
}
}
@Override
public String toString(String field) {
return "IncludeNestedDocsQuery (" + parentQuery.toString() + ")";
}
@Override
public boolean equals(Object _other) {
if (_other instanceof IncludeNestedDocsQuery) {
final IncludeNestedDocsQuery other = (IncludeNestedDocsQuery) _other;
return origParentQuery.equals(other.origParentQuery) && parentFilter.equals(other.parentFilter);
} else {
return false;
}
}
@Override
public int hashCode() {
final int prime = 31;
int hash = 1;
hash = prime * hash + origParentQuery.hashCode();
hash = prime * hash + parentFilter.hashCode();
return hash;
}
@Override
public Query clone() {
Query clonedQuery = origParentQuery.clone();
return new IncludeNestedDocsQuery(clonedQuery, this);
}
}

View File

@ -27,7 +27,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig;
import java.io.IOException;
@ -54,13 +53,11 @@ public class IndexSearcherWrapper {
}
/**
* @param engineConfig The engine config which can be used to get the query cache and query cache policy from
* when creating a new index searcher
* @param searcher The provided index searcher to be wrapped to add custom functionality
* @return a new index searcher wrapping the provided index searcher or if no wrapping was performed
* the provided index searcher
*/
protected IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws IOException {
protected IndexSearcher wrap(IndexSearcher searcher) throws IOException {
return searcher;
}
/**
@ -69,7 +66,7 @@ public class IndexSearcherWrapper {
*
* This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
*/
public final Engine.Searcher wrap(EngineConfig engineConfig, Engine.Searcher engineSearcher) throws IOException {
public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException {
final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
if (elasticsearchDirectoryReader == null) {
throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
@ -87,14 +84,15 @@ public class IndexSearcherWrapper {
}
}
final IndexSearcher origIndexSearcher = engineSearcher.searcher();
final IndexSearcher innerIndexSearcher = new IndexSearcher(reader);
innerIndexSearcher.setQueryCache(engineConfig.getQueryCache());
innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
innerIndexSearcher.setSimilarity(engineConfig.getSimilarity());
innerIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache());
innerIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy());
innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity(true));
// TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
// For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
// This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
final IndexSearcher indexSearcher = wrap(engineConfig, innerIndexSearcher);
final IndexSearcher indexSearcher = wrap(innerIndexSearcher);
if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) {
return engineSearcher;
} else {

View File

@ -727,7 +727,7 @@ public class IndexShard extends AbstractIndexShardComponent {
final Engine.Searcher searcher = engine.acquireSearcher(source);
boolean success = false;
try {
final Engine.Searcher wrappedSearcher = searcherWrapper == null ? searcher : searcherWrapper.wrap(engineConfig, searcher);
final Engine.Searcher wrappedSearcher = searcherWrapper == null ? searcher : searcherWrapper.wrap(searcher);
assert wrappedSearcher != null;
success = true;
return wrappedSearcher;

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.common.breaker.NoopCircuitBreaker;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.node.settings.NodeSettingsService;
@ -40,6 +41,8 @@ import java.util.concurrent.atomic.AtomicLong;
*/
public class HierarchyCircuitBreakerService extends CircuitBreakerService {
private static final String CHILD_LOGGER_PREFIX = "org.elasticsearch.indices.breaker.";
private final ConcurrentMap<String, CircuitBreaker> breakers = new ConcurrentHashMap();
// Old pre-1.4.0 backwards compatible settings
@ -237,7 +240,8 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
} else {
CircuitBreaker oldBreaker;
CircuitBreaker breaker = new ChildMemoryCircuitBreaker(breakerSettings,
logger, this, breakerSettings.getName());
Loggers.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()),
this, breakerSettings.getName());
for (;;) {
oldBreaker = breakers.putIfAbsent(breakerSettings.getName(), breaker);
@ -245,7 +249,9 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
return;
}
breaker = new ChildMemoryCircuitBreaker(breakerSettings,
(ChildMemoryCircuitBreaker)oldBreaker, logger, this, breakerSettings.getName());
(ChildMemoryCircuitBreaker)oldBreaker,
Loggers.getLogger(CHILD_LOGGER_PREFIX + breakerSettings.getName()),
this, breakerSettings.getName());
if (breakers.replace(breakerSettings.getName(), oldBreaker, breaker)) {
return;

View File

@ -82,6 +82,7 @@ public class PluginManager {
"lang-groovy",
"lang-javascript",
"lang-python",
"mapper-attachments",
"mapper-murmur3",
"mapper-size",
"repository-azure",

View File

@ -800,7 +800,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
currentFieldName = parser.currentName();
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType());
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder();
xContentBuilder.startObject();
xContentBuilder.field(currentFieldName);
xContentBuilder.copyCurrentStructure(parser);
@ -813,22 +813,21 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
builder.aggregations = aggregations;
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.highlightBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.innerHitsBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType());
xContentBuilder.copyCurrentStructure(parser);
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.suggestBuilder = xContentBuilder.bytes();
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
List<BytesReference> sorts = new ArrayList<>();
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
sorts.add(xContentBuilder.bytes());
builder.sorts = sorts;
} else if (context.parseFieldMatcher().match(currentFieldName, EXT_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.ext = xContentBuilder.bytes();
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
@ -861,14 +860,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
List<BytesReference> sorts = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
sorts.add(xContentBuilder.bytes());
}
builder.sorts = sorts;
} else if (context.parseFieldMatcher().match(currentFieldName, RESCORE_FIELD)) {
List<BytesReference> rescoreBuilders = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
XContentBuilder xContentBuilder = XContentFactory.contentBuilder(parser.contentType()).copyCurrentStructure(parser);
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
rescoreBuilders.add(xContentBuilder.bytes());
}
builder.rescoreBuilders = rescoreBuilders;

View File

@ -41,6 +41,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
@ -196,14 +197,16 @@ public class DefaultSearchContext extends SearchContext {
if (query() == null) {
parsedQuery(ParsedQuery.parsedMatchAllQuery());
}
if (queryBoost() != 1.0f) {
if (queryBoost() != AbstractQueryBuilder.DEFAULT_BOOST) {
parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new WeightFactorFunction(queryBoost)), parsedQuery()));
}
Query searchFilter = searchFilter(types());
if (searchFilter != null) {
if (Queries.isConstantMatchAllQuery(query())) {
Query q = new ConstantScoreQuery(searchFilter);
q.setBoost(query().getBoost());
if (query().getBoost() != AbstractQueryBuilder.DEFAULT_BOOST) {
q = new BoostQuery(q, query().getBoost());
}
parsedQuery(new ParsedQuery(q, parsedQuery()));
} else {
BooleanQuery filtered = new BooleanQuery.Builder()

View File

@ -38,7 +38,7 @@ grant codeBase "${codebase.securesm-1.0.jar}" {
//// Very special jar permissions:
//// These are dangerous permissions that we don't want to grant to everything.
grant codeBase "${codebase.lucene-core-5.4.0-snapshot-1711508.jar}" {
grant codeBase "${codebase.lucene-core-5.4.0-snapshot-1712973.jar}" {
// needed to allow MMapDirectory's "unmap hack"
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";

View File

@ -30,7 +30,7 @@ grant codeBase "${codebase.securemock-1.1.jar}" {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
};
grant codeBase "${codebase.lucene-test-framework-5.4.0-snapshot-1711508.jar}" {
grant codeBase "${codebase.lucene-test-framework-5.4.0-snapshot-1712973.jar}" {
// needed by RamUsageTester
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
};

View File

@ -47,6 +47,7 @@ OFFICIAL PLUGINS
- lang-groovy
- lang-javascript
- lang-python
- mapper-attachments
- mapper-murmur3
- mapper-size
- repository-azure

View File

@ -0,0 +1,116 @@
package org.elasticsearch.action.support.master;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.disruption.NetworkDisconnectPartition;
import org.elasticsearch.test.disruption.NetworkPartition;
import org.elasticsearch.test.transport.MockTransportService;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import static org.hamcrest.Matchers.equalTo;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
@ESIntegTestCase.SuppressLocalMode
public class IndexingMasterFailoverIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
final HashSet<Class<? extends Plugin>> classes = new HashSet<>(super.nodePlugins());
classes.add(MockTransportService.TestPlugin.class);
return classes;
}
/**
* Indexing operations which entail mapping changes require a blocking request to the master node to update the mapping.
* If the master node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits.
* This retry logic is implemented in TransportMasterNodeAction and tested by the following master failover scenario.
*/
public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable {
logger.info("--> start 4 nodes, 3 master, 1 data");
final Settings sharedSettings = Settings.builder()
.put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly
.put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
.put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2)
.put("transport.host", "127.0.0.1") // only bind on one IF we use v4 here by default
.build();
internalCluster().startMasterOnlyNodesAsync(3, sharedSettings).get();
String dataNode = internalCluster().startDataOnlyNode(sharedSettings);
logger.info("--> wait for all nodes to join the cluster");
ensureStableCluster(4);
// We index data with mapping changes into cluster and have master failover at same time
client().admin().indices().prepareCreate("myindex")
.setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
.get();
ensureGreen("myindex");
final CyclicBarrier barrier = new CyclicBarrier(2);
Thread indexingThread = new Thread(new Runnable() {
@Override
public void run() {
try {
barrier.await();
} catch (InterruptedException e) {
logger.warn("Barrier interrupted", e);
return;
} catch (BrokenBarrierException e) {
logger.warn("Broken barrier", e);
return;
}
for (int i = 0; i < 10; i++) {
// index data with mapping changes
IndexResponse response = client(dataNode).prepareIndex("myindex", "mytype").setSource("field_" + i, "val").get();
assertThat(response.isCreated(), equalTo(true));
}
}
});
indexingThread.setName("indexingThread");
indexingThread.start();
barrier.await();
// interrupt communication between master and other nodes in cluster
String master = internalCluster().getMasterName();
Set<String> otherNodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames()));
otherNodes.remove(master);
NetworkPartition partition = new NetworkDisconnectPartition(Collections.singleton(master), otherNodes, random());
internalCluster().setDisruptionScheme(partition);
logger.info("--> disrupting network");
partition.startDisrupting();
logger.info("--> waiting for new master to be elected");
ensureStableCluster(3, dataNode);
partition.stopDisrupting();
logger.info("--> waiting to heal");
ensureStableCluster(4);
indexingThread.join();
ensureGreen("myindex");
refresh();
assertThat(client().prepareSearch("myindex").get().getHits().getTotalHits(), equalTo(10L));
}
}

View File

@ -29,6 +29,7 @@ import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -38,6 +39,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase;
@ -256,17 +259,8 @@ public class TransportMasterNodeActionTests extends ESTestCase {
clusterService.setState(ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final AtomicBoolean delegationToMaster = new AtomicBoolean();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void processBeforeDelegationToMaster(Request request, ClusterState state) {
logger.debug("Delegation to master called");
delegationToMaster.set(true);
}
}.execute(request, listener);
assertTrue("processBeforeDelegationToMaster not called", delegationToMaster.get());
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
@ -285,17 +279,8 @@ public class TransportMasterNodeActionTests extends ESTestCase {
clusterService.setState(ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final AtomicBoolean delegationToMaster = new AtomicBoolean();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void processBeforeDelegationToMaster(Request request, ClusterState state) {
logger.debug("Delegation to master called");
delegationToMaster.set(true);
}
}.execute(request, listener);
assertTrue("processBeforeDelegationToMaster not called", delegationToMaster.get());
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
@ -320,4 +305,35 @@ public class TransportMasterNodeActionTests extends ESTestCase {
}
}
}
public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Response response = new Response();
clusterService.setState(ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
// The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery
TransportMasterNodeActionTests.this.clusterService.setState(ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
Throwable failure = randomBoolean()
? new Discovery.FailedToCommitClusterStateException("Fake error")
: new NotMasterException("Fake error");
listener.onFailure(failure);
}
}.execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
}

View File

@ -27,13 +27,16 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.test.gateway.NoopGatewayAllocator;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.equalTo;
@ -624,4 +627,93 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
assertThat(routingNodes.node("node3").isEmpty(), equalTo(true));
}
public void testRebalanceWhileShardFetching() {
final AtomicBoolean hasFetches = new AtomicBoolean(true);
AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new NoopGatewayAllocator() {
@Override
public boolean allocateUnassigned(RoutingAllocation allocation) {
if (hasFetches.get()) {
allocation.setHasPendingAsyncFetch();
}
return super.allocateUnassigned(allocation);
}
});
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
.put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "_id", "node1,node2")).numberOfShards(2).numberOfReplicas(0))
.build();
// we use a second index here (test1) that never gets assigned otherwise allocateUnassinged is never called if we don't have unassigned shards.
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.addAsNew(metaData.index("test1"))
.build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(INITIALIZING));
}
logger.debug("start all the primary shards for test");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState("test", INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
logger.debug("now, start 1 more node, check that rebalancing will not happen since we have shard sync going on");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node2")))
.build();
logger.debug("reroute and check that nothing has changed");
RoutingAllocation.Result reroute = strategy.reroute(clusterState);
assertFalse(reroute.changed());
routingTable = reroute.routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
assertThat(routingTable.index("test").shard(i).primaryShard().state(), equalTo(STARTED));
}
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(1));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
logger.debug("now set hasFetches to true and reroute we should now see exactly one relocating shard");
hasFetches.set(false);
reroute = strategy.reroute(clusterState);
assertTrue(reroute.changed());
routingTable = reroute.routingTable();
int numStarted = 0;
int numRelocating = 0;
for (int i = 0; i < routingTable.index("test").shards().size(); i++) {
assertThat(routingTable.index("test").shard(i).shards().size(), equalTo(1));
if (routingTable.index("test").shard(i).primaryShard().state() == STARTED) {
numStarted++;
} else if (routingTable.index("test").shard(i).primaryShard().state() == RELOCATING) {
numRelocating++;
}
}
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(1));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(UNASSIGNED));
}
assertEquals(numStarted, 1);
assertEquals(numRelocating, 1);
}
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESAllocationTestCase;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
@ -87,6 +88,64 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
}
public void testClusterLevelShardsLimitAllocate() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 1)
.build());
logger.info("Building initial routing table");
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 4)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
logger.info("Start the primary shards");
RoutingNodes routingNodes = clusterState.getRoutingNodes();
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(1));
assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(2));
// Bump the cluster total shards to 2
strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 2)
.build());
logger.info("Do another reroute, make sure shards are now allocated");
routingTable = strategy.reroute(clusterState).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.INITIALIZING), equalTo(1));
routingNodes = clusterState.getRoutingNodes();
routingTable = strategy.applyStartedShards(clusterState, routingNodes.shardsWithState(INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.getRoutingNodes().node("node1").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
assertThat(clusterState.getRoutingNodes().node("node2").numberOfShardsWithState(ShardRoutingState.STARTED), equalTo(2));
assertThat(clusterState.getRoutingNodes().unassigned().size(), equalTo(0));
}
public void testIndexLevelShardsLimitRemain() {
AllocationService strategy = createAllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)

View File

@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.RAMDirectory;
@ -34,6 +35,7 @@ import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class MultiPhrasePrefixQueryTests extends ESTestCase {
public void testSimple() throws Exception {
@ -78,6 +80,8 @@ public class MultiPhrasePrefixQueryTests extends ESTestCase {
multiPhrasePrefixQuery.add(new Term[]{new Term("field", "aaa"), new Term("field", "bb")});
multiPhrasePrefixQuery.setBoost(randomFloat());
Query query = multiPhrasePrefixQuery.rewrite(reader);
assertThat(query.getBoost(), equalTo(multiPhrasePrefixQuery.getBoost()));
assertThat(query, instanceOf(BoostQuery.class));
BoostQuery boostQuery = (BoostQuery) query;
assertThat(boostQuery.getBoost(), equalTo(multiPhrasePrefixQuery.getBoost()));
}
}

View File

@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingService;

View File

@ -385,7 +385,7 @@ public class IndexModuleTests extends ESTestCase {
}
@Override
public IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException {
public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
return null;
}
}

View File

@ -519,7 +519,7 @@ public class InternalEngineTests extends ESTestCase {
}
@Override
public IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException {
public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
counter.incrementAndGet();
return searcher;
}
@ -530,7 +530,7 @@ public class InternalEngineTests extends ESTestCase {
engine.close();
engine = new InternalEngine(engine.config(), false);
Engine.Searcher searcher = wrapper.wrap(engine.config(), engine.acquireSearcher("test"));
Engine.Searcher searcher = wrapper.wrap(engine.acquireSearcher("test"));
assertThat(counter.get(), equalTo(2));
searcher.close();
IOUtils.close(store, engine);

View File

@ -37,7 +37,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.MapperBuilders;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.geo.BaseGeoPointFieldMapper;
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapperLegacy;
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
@ -95,7 +94,6 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
} else if (type.getType().equals("byte")) {
fieldType = MapperBuilders.byteField(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();
} else if (type.getType().equals("geo_point")) {
BaseGeoPointFieldMapper.Builder builder;
// norelease update to .before(Version.V_2_2_0 once GeoPointFieldV2 is fully merged
if (indexService.getIndexSettings().getIndexVersionCreated().onOrBefore(Version.CURRENT)) {
fieldType = new GeoPointFieldMapperLegacy.Builder(fieldName).docValues(docValues).fieldDataSettings(type.getSettings()).build(context).fieldType();

View File

@ -0,0 +1,98 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.GeoPointField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.util.GeoUtils;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.VersionUtils;
import static org.elasticsearch.test.geo.RandomShapeGenerator.randomPoint;
import static org.hamcrest.Matchers.*;
/**
*
*/
public abstract class AbstractGeoFieldDataTestCase extends AbstractFieldDataImplTestCase {
protected Version version = VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.CURRENT);
@Override
protected abstract FieldDataType getFieldDataType();
protected Settings.Builder getFieldDataSettings() {
return Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version);
}
protected Field randomGeoPointField(String fieldName, Field.Store store) {
GeoPoint point = randomPoint(random());
// norelease move to .before(Version.2_2_0) once GeoPointV2 is fully merged
if (version.onOrBefore(Version.CURRENT)) {
return new StringField(fieldName, point.lat()+","+point.lon(), store);
}
return new GeoPointField(fieldName, point.lon(), point.lat(), store);
}
@Override
protected void fillAllMissing() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
writer.addDocument(d);
}
@Override
public void testSortMultiValuesFields() {
assumeFalse("Only test on non geo_point fields", getFieldDataType().equals("geo_point"));
}
protected void assertValues(MultiGeoPointValues values, int docId) {
assertValues(values, docId, false);
}
protected void assertMissing(MultiGeoPointValues values, int docId) {
assertValues(values, docId, true);
}
private void assertValues(MultiGeoPointValues values, int docId, boolean missing) {
values.setDocument(docId);
int docCount = values.count();
if (missing) {
assertThat(docCount, equalTo(0));
} else {
assertThat(docCount, greaterThan(0));
for (int i = 0; i < docCount; ++i) {
assertThat(values.valueAt(i).lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LAT_INCL), lessThanOrEqualTo(GeoUtils.MAX_LAT_INCL)));
assertThat(values.valueAt(i).lat(), allOf(greaterThanOrEqualTo(GeoUtils.MIN_LON_INCL), lessThanOrEqualTo(GeoUtils.MAX_LON_INCL)));
}
}
}
}

View File

@ -0,0 +1,205 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.apache.lucene.document.*;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.elasticsearch.index.fielddata.plain.AbstractAtomicGeoPointFieldData;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
/**
* Basic Unit Test for GeoPointField data
* todo include backcompat testing - see ISSUE #14562
*/
public class GeoFieldDataTests extends AbstractGeoFieldDataTestCase {
private static String FIELD_NAME = "value";
@Override
protected FieldDataType getFieldDataType() {
return new FieldDataType("geo_point", getFieldDataSettings());
}
@Override
protected void add2SingleValuedDocumentsAndDeleteOneOfThem() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.YES));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
writer.commit();
writer.deleteDocuments(new Term("_id", "1"));
}
@Override
protected void fillMultiValueWithMissing() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
// missing
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
}
@Override
protected void fillSingleValueAllSet() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
}
@Override
protected void fillSingleValueWithMissing() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
//d.add(new StringField("value", one(), Field.Store.NO)); // MISSING....
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
}
@Override
protected void fillMultiValueAllSet() throws Exception {
Document d = new Document();
d.add(new StringField("_id", "1", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "2", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
d = new Document();
d.add(new StringField("_id", "3", Field.Store.NO));
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
writer.addDocument(d);
}
@Override
protected void fillExtendedMvSet() throws Exception {
Document d;
final int maxDocs = randomInt(10);
for (int i=0; i<maxDocs; ++i) {
d = new Document();
d.add(new StringField("_id", i+"", Field.Store.NO));
int maxVals = randomInt(5);
for (int v=0; v<maxVals; ++v) {
d.add(randomGeoPointField(FIELD_NAME, Field.Store.NO));
}
writer.addDocument(d);
if (randomBoolean()) {
writer.commit();
}
}
}
@Override
public void testSingleValueAllSet() throws Exception {
fillSingleValueAllSet();
IndexFieldData indexFieldData = getForField("value");
LeafReaderContext readerContext = refreshReader();
AtomicFieldData fieldData = indexFieldData.load(readerContext);
assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
MultiGeoPointValues fieldValues = ((AbstractAtomicGeoPointFieldData)fieldData).getGeoPointValues();
assertValues(fieldValues, 0);
assertValues(fieldValues, 1);
assertValues(fieldValues, 2);
}
@Override
public void testSingleValueWithMissing() throws Exception {
fillSingleValueWithMissing();
IndexFieldData indexFieldData = getForField("value");
LeafReaderContext readerContext = refreshReader();
AtomicFieldData fieldData = indexFieldData.load(readerContext);
assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
MultiGeoPointValues fieldValues = ((AbstractAtomicGeoPointFieldData)fieldData).getGeoPointValues();
assertValues(fieldValues, 0);
assertMissing(fieldValues, 1);
assertValues(fieldValues, 2);
}
@Override
public void testMultiValueAllSet() throws Exception {
fillMultiValueAllSet();
IndexFieldData indexFieldData = getForField("value");
LeafReaderContext readerContext = refreshReader();
AtomicFieldData fieldData = indexFieldData.load(readerContext);
assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
MultiGeoPointValues fieldValues = ((AbstractAtomicGeoPointFieldData)fieldData).getGeoPointValues();
assertValues(fieldValues, 0);
assertValues(fieldValues, 1);
assertValues(fieldValues, 2);
}
@Override
public void testMultiValueWithMissing() throws Exception {
fillMultiValueWithMissing();
IndexFieldData indexFieldData = getForField("value");
AtomicFieldData fieldData = indexFieldData.load(refreshReader());
assertThat(fieldData.ramBytesUsed(), greaterThanOrEqualTo(minRamBytesUsed()));
MultiGeoPointValues fieldValues = ((AbstractAtomicGeoPointFieldData)fieldData).getGeoPointValues();
assertValues(fieldValues, 0);
assertMissing(fieldValues, 1);
assertValues(fieldValues, 2);
}
}

View File

@ -23,8 +23,12 @@ import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.io.JsonStringEncoder;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanBoostQuery;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
@ -40,6 +44,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
@ -55,10 +60,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.*;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index;
@ -104,9 +106,7 @@ import java.lang.reflect.Proxy;
import java.util.*;
import java.util.concurrent.ExecutionException;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.*;
public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>> extends ESTestCase {
@ -336,13 +336,24 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
public void testFromXContent() throws IOException {
for (int runs = 0; runs < NUMBER_OF_TESTQUERIES; runs++) {
QB testQuery = createTestQueryBuilder();
assertParsedQuery(testQuery.toString(), testQuery);
XContentBuilder builder = toXContent(testQuery, randomFrom(XContentType.values()));
assertParsedQuery(builder.bytes(), testQuery);
for (Map.Entry<String, QB> alternateVersion : getAlternateVersions().entrySet()) {
assertParsedQuery(alternateVersion.getKey(), alternateVersion.getValue());
String queryAsString = alternateVersion.getKey();
assertParsedQuery(new BytesArray(queryAsString), alternateVersion.getValue());
}
}
}
protected static XContentBuilder toXContent(QueryBuilder<?> query, XContentType contentType) throws IOException {
XContentBuilder builder = XContentFactory.contentBuilder(contentType);
if (randomBoolean()) {
builder.prettyPrint();
}
query.toXContent(builder, ToXContent.EMPTY_PARAMS);
return builder;
}
/**
* Test that unknown field trigger ParsingException.
* To find the right position in the root query, we add a marker as `queryName` which
@ -411,6 +422,20 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
assertEquals(expectedQuery.hashCode(), newQuery.hashCode());
}
/**
* Parses the query provided as bytes argument and compares it with the expected result provided as argument as a {@link QueryBuilder}
*/
protected final void assertParsedQuery(BytesReference queryAsBytes, QueryBuilder<?> expectedQuery) throws IOException {
assertParsedQuery(queryAsBytes, expectedQuery, ParseFieldMatcher.STRICT);
}
protected final void assertParsedQuery(BytesReference queryAsBytes, QueryBuilder<?> expectedQuery, ParseFieldMatcher matcher) throws IOException {
QueryBuilder<?> newQuery = parseQuery(queryAsBytes, matcher);
assertNotSame(newQuery, expectedQuery);
assertEquals(expectedQuery, newQuery);
assertEquals(expectedQuery.hashCode(), newQuery.hashCode());
}
protected final QueryBuilder<?> parseQuery(String queryAsString) throws IOException {
return parseQuery(queryAsString, ParseFieldMatcher.STRICT);
}
@ -420,12 +445,16 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
return parseQuery(parser, matcher);
}
protected final QueryBuilder<?> parseQuery(BytesReference query) throws IOException {
XContentParser parser = XContentFactory.xContent(query).createParser(query);
return parseQuery(parser, ParseFieldMatcher.STRICT);
protected final QueryBuilder<?> parseQuery(BytesReference queryAsBytes) throws IOException {
return parseQuery(queryAsBytes, ParseFieldMatcher.STRICT);
}
protected final QueryBuilder<?> parseQuery(XContentParser parser, ParseFieldMatcher matcher) throws IOException {
protected final QueryBuilder<?> parseQuery(BytesReference queryAsBytes, ParseFieldMatcher matcher) throws IOException {
XContentParser parser = XContentFactory.xContent(queryAsBytes).createParser(queryAsBytes);
return parseQuery(parser, matcher);
}
private QueryBuilder<?> parseQuery(XContentParser parser, ParseFieldMatcher matcher) throws IOException {
QueryParseContext context = createParseContext();
context.reset(parser);
context.parseFieldMatcher(matcher);
@ -501,26 +530,45 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
assertThat(namedQuery, equalTo(query));
}
if (query != null) {
assertBoost(queryBuilder, query);
if (queryBuilder.boost() != AbstractQueryBuilder.DEFAULT_BOOST) {
assertThat(query, either(instanceOf(BoostQuery.class)).or(instanceOf(SpanBoostQuery.class)));
if (query instanceof SpanBoostQuery) {
SpanBoostQuery spanBoostQuery = (SpanBoostQuery) query;
assertThat(spanBoostQuery.getBoost(), equalTo(queryBuilder.boost()));
query = spanBoostQuery.getQuery();
} else {
BoostQuery boostQuery = (BoostQuery) query;
assertThat(boostQuery.getBoost(), equalTo(queryBuilder.boost()));
query = boostQuery.getQuery();
}
}
}
doAssertLuceneQuery(queryBuilder, query, context);
}
/**
* Allows to override boost assertions for queries that don't have the default behaviour
*/
protected void assertBoost(QB queryBuilder, Query query) throws IOException {
// workaround https://bugs.openjdk.java.net/browse/JDK-8056984
float boost = queryBuilder.boost();
assertThat(query.getBoost(), equalTo(boost));
}
/**
* Checks the result of {@link QueryBuilder#toQuery(QueryShardContext)} given the original {@link QueryBuilder} and {@link QueryShardContext}.
* Contains the query specific checks to be implemented by subclasses.
*/
protected abstract void doAssertLuceneQuery(QB queryBuilder, Query query, QueryShardContext context) throws IOException;
protected static void assertTermOrBoostQuery(Query query, String field, String value, float fieldBoost) {
if (fieldBoost != AbstractQueryBuilder.DEFAULT_BOOST) {
assertThat(query, instanceOf(BoostQuery.class));
BoostQuery boostQuery = (BoostQuery) query;
assertThat(boostQuery.getBoost(), equalTo(fieldBoost));
query = boostQuery.getQuery();
}
assertTermQuery(query, field, value);
}
protected static void assertTermQuery(Query query, String field, String value) {
assertThat(query, instanceOf(TermQuery.class));
TermQuery termQuery = (TermQuery) query;
assertThat(termQuery.getTerm().field(), equalTo(field));
assertThat(termQuery.getTerm().text().toLowerCase(Locale.ROOT), equalTo(value.toLowerCase(Locale.ROOT)));
}
/**
* Test serialization and deserialization of the test query.
*/

View File

@ -20,12 +20,15 @@
package org.elasticsearch.index.query;
import org.apache.lucene.search.*;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.hamcrest.Matchers;
import java.io.IOException;
import java.util.*;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.*;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
@ -174,7 +177,8 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
// https://github.com/elasticsearch/elasticsearch/issues/7240
public void testEmptyBooleanQuery() throws Exception {
String query = jsonBuilder().startObject().startObject("bool").endObject().endObject().string();
XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
BytesReference query = contentBuilder.startObject().startObject("bool").endObject().endObject().bytes();
Query parsedQuery = parseQuery(query).toQuery(createShardContext());
assertThat(parsedQuery, Matchers.instanceOf(MatchAllDocsQuery.class));
}

View File

@ -20,21 +20,16 @@
package org.elasticsearch.index.query;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.*;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.*;
public class DisMaxQueryBuilderTests extends AbstractQueryTestCase<DisMaxQueryBuilder> {
/**
@ -138,9 +133,13 @@ public class DisMaxQueryBuilderTests extends AbstractQueryTestCase<DisMaxQueryBu
List<Query> disjuncts = disjunctionMaxQuery.getDisjuncts();
assertThat(disjuncts.size(), equalTo(1));
PrefixQuery firstQ = (PrefixQuery) disjuncts.get(0);
assertThat(disjuncts.get(0), instanceOf(BoostQuery.class));
BoostQuery boostQuery = (BoostQuery) disjuncts.get(0);
assertThat((double) boostQuery.getBoost(), closeTo(1.2, 0.00001));
assertThat(boostQuery.getQuery(), instanceOf(PrefixQuery.class));
PrefixQuery firstQ = (PrefixQuery) boostQuery.getQuery();
// since age is automatically registered in data, we encode it as numeric
assertThat(firstQ.getPrefix(), equalTo(new Term(STRING_FIELD_NAME, "sh")));
assertThat((double) firstQ.getBoost(), closeTo(1.2, 0.00001));
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.index.query;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
@ -116,12 +117,15 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase<FuzzyQueryBuil
" }\n" +
"}";
Query parsedQuery = parseQuery(query).toQuery(createShardContext());
assertThat(parsedQuery, instanceOf(FuzzyQuery.class));
FuzzyQuery fuzzyQuery = (FuzzyQuery) parsedQuery;
assertThat(parsedQuery, instanceOf(BoostQuery.class));
BoostQuery boostQuery = (BoostQuery) parsedQuery;
assertThat(boostQuery.getBoost(), equalTo(2.0f));
assertThat(boostQuery.getQuery(), instanceOf(FuzzyQuery.class));
FuzzyQuery fuzzyQuery = (FuzzyQuery) boostQuery.getQuery();
assertThat(fuzzyQuery.getTerm(), equalTo(new Term(STRING_FIELD_NAME, "sh")));
assertThat(fuzzyQuery.getMaxEdits(), equalTo(Fuzziness.AUTO.asDistance("sh")));
assertThat(fuzzyQuery.getPrefixLength(), equalTo(1));
assertThat(fuzzyQuery.getBoost(), equalTo(2.0f));
}
public void testToQueryWithNumericField() throws IOException {
@ -130,8 +134,7 @@ public class FuzzyQueryBuilderTests extends AbstractQueryTestCase<FuzzyQueryBuil
" \"fuzzy\":{\n" +
" \"" + INT_FIELD_NAME + "\":{\n" +
" \"value\":12,\n" +
" \"fuzziness\":5,\n" +
" \"boost\":2.0\n" +
" \"fuzziness\":5\n" +
" }\n" +
" }\n" +
"}\n";

View File

@ -21,6 +21,7 @@ package org.elasticsearch.index.query;
import org.apache.lucene.search.GeoPointDistanceRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.SloppyMath;
import org.elasticsearch.Version;
import org.elasticsearch.common.geo.GeoDistance;
@ -148,7 +149,11 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
if (queryBuilder.geoDistance() != null) {
fromValue = queryBuilder.geoDistance().normalize(fromValue, DistanceUnit.DEFAULT);
}
assertThat(geoQuery.minInclusiveDistance(), closeTo(fromValue, Math.abs(fromValue) / 1000));
double fromSlop = Math.abs(fromValue) / 1000;
if (queryBuilder.includeLower() == false) {
fromSlop = NumericUtils.sortableLongToDouble((NumericUtils.doubleToSortableLong(fromValue) + 1L));
}
assertThat(geoQuery.minInclusiveDistance(), closeTo(fromValue, fromSlop));
}
if (queryBuilder.to() != null && queryBuilder.to() instanceof Number) {
double toValue = ((Number) queryBuilder.to()).doubleValue();
@ -158,7 +163,11 @@ public class GeoDistanceRangeQueryTests extends AbstractQueryTestCase<GeoDistanc
if (queryBuilder.geoDistance() != null) {
toValue = queryBuilder.geoDistance().normalize(toValue, DistanceUnit.DEFAULT);
}
assertThat(geoQuery.maxInclusiveDistance(), closeTo(toValue, Math.abs(toValue) / 1000));
double toSlop = Math.abs(toValue) / 1000;
if (queryBuilder.includeUpper() == false) {
toSlop = NumericUtils.sortableLongToDouble((NumericUtils.doubleToSortableLong(toValue) + 1L));
}
assertThat(geoQuery.maxInclusiveDistance(), closeTo(toValue, toSlop));
}
}

View File

@ -56,15 +56,7 @@ public class IndicesQueryBuilderTests extends AbstractQueryTestCase<IndicesQuery
} else {
expected = queryBuilder.noMatchQuery().toQuery(context);
}
if (expected != null && queryBuilder.boost() != AbstractQueryBuilder.DEFAULT_BOOST) {
expected.setBoost(queryBuilder.boost());
}
assertEquals(query, expected);
}
@Override
protected void assertBoost(IndicesQueryBuilder queryBuilder, Query query) throws IOException {
//nothing to do here, boost check is already included in equality check done as part of doAssertLuceneQuery above
assertEquals(expected, query);
}
public void testIllegalArguments() {

Some files were not shown because too many files have changed in this diff Show More