Merge branch 'master' into feature/ingest
This commit is contained in:
commit
a9ecde041b
|
@ -0,0 +1,10 @@
|
|||
# EditorConfig: http://editorconfig.org/
|
||||
|
||||
root = true
|
||||
|
||||
[*.java]
|
||||
charset = utf-8
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
|
@ -149,17 +149,23 @@ gradle test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix)
|
|||
|
||||
=== Load balancing and caches.
|
||||
|
||||
By default, the tests run sequentially on a single forked JVM.
|
||||
|
||||
To run with more forked JVMs than the default use:
|
||||
By default the tests run on up to 4 JVMs based on the number of cores. If you
|
||||
want to explicitly specify the number of JVMs you can do so on the command
|
||||
line:
|
||||
|
||||
----------------------------
|
||||
gradle test -Dtests.jvms=8
|
||||
----------------------------
|
||||
|
||||
Don't count hypercores for CPU-intense tests and leave some slack
|
||||
for JVM-internal threads (like the garbage collector). Make sure there is
|
||||
enough RAM to handle child JVMs.
|
||||
Or in `~/.gradle/gradle.properties`:
|
||||
|
||||
----------------------------
|
||||
systemProp.tests.jvms=8
|
||||
----------------------------
|
||||
|
||||
Its difficult to pick the "right" number here. Hypercores don't count for CPU
|
||||
intensive tests and you should leave some slack for JVM-interal threads like
|
||||
the garbage collector. And you have to have enough RAM to handle each JVM.
|
||||
|
||||
=== Test compatibility.
|
||||
|
||||
|
@ -280,11 +286,20 @@ The REST layer is tested through specific tests that are shared between all
|
|||
the elasticsearch official clients and consist of YAML files that describe the
|
||||
operations to be executed and the obtained results that need to be tested.
|
||||
|
||||
The REST tests are run automatically when executing the maven test command. To run only the
|
||||
The REST tests are run automatically when executing the "gradle check" command. To run only the
|
||||
REST tests use the following command:
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle integTest -Dtests.filter="@Rest"
|
||||
gradle :distribution:tar:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
A specific test case can be run with
|
||||
|
||||
---------------------------------------------------------------------------
|
||||
gradle :distribution:tar:integTest \
|
||||
-Dtests.class=org.elasticsearch.test.rest.RestIT \
|
||||
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
|
||||
---------------------------------------------------------------------------
|
||||
|
||||
`RestNIT` are the executable test classes that runs all the
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.carrotsearch.gradle.junit4
|
||||
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.util.ConfigureUtil
|
||||
|
||||
class TestLoggingConfiguration {
|
||||
|
@ -20,6 +21,10 @@ class TestLoggingConfiguration {
|
|||
SlowTestsConfiguration slowTests = new SlowTestsConfiguration()
|
||||
StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration()
|
||||
|
||||
/** Summarize the first N failures at the end of the test. */
|
||||
@Input
|
||||
int showNumFailuresAtEnd = 3 // match TextReport default
|
||||
|
||||
void slowTests(Closure closure) {
|
||||
ConfigureUtil.configure(closure, slowTests)
|
||||
}
|
||||
|
@ -31,4 +36,8 @@ class TestLoggingConfiguration {
|
|||
void outputMode(String mode) {
|
||||
outputMode = mode.toUpperCase() as OutputMode
|
||||
}
|
||||
|
||||
void showNumFailuresAtEnd(int n) {
|
||||
showNumFailuresAtEnd = n
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,9 +48,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
|
|||
/** Format line for JVM ID string. */
|
||||
String jvmIdFormat
|
||||
|
||||
/** Summarize the first N failures at the end. */
|
||||
int showNumFailuresAtEnd = 3
|
||||
|
||||
/** Output stream that logs messages to the given logger */
|
||||
LoggingOutputStream outStream
|
||||
LoggingOutputStream errStream
|
||||
|
@ -110,13 +107,13 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
|
|||
|
||||
@Subscribe
|
||||
void onQuit(AggregatedQuitEvent e) throws IOException {
|
||||
if (showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
|
||||
if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
|
||||
List<Description> sublist = this.failedTests
|
||||
StringBuilder b = new StringBuilder()
|
||||
b.append('Tests with failures')
|
||||
if (sublist.size() > showNumFailuresAtEnd) {
|
||||
sublist = sublist.subList(0, showNumFailuresAtEnd)
|
||||
b.append(" (first " + showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
|
||||
if (sublist.size() > config.showNumFailuresAtEnd) {
|
||||
sublist = sublist.subList(0, config.showNumFailuresAtEnd)
|
||||
b.append(" (first " + config.showNumFailuresAtEnd + " out of " + failedTests.size() + ")")
|
||||
}
|
||||
b.append(':\n')
|
||||
for (Description description : sublist) {
|
||||
|
|
|
@ -62,7 +62,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
configureCompile(project)
|
||||
|
||||
configureTest(project)
|
||||
PrecommitTasks.configure(project)
|
||||
configurePrecommit(project)
|
||||
}
|
||||
|
||||
/** Performs checks on the build environment and prints information about the build environment. */
|
||||
|
@ -283,6 +283,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
/** Adds compiler settings to the project */
|
||||
static void configureCompile(Project project) {
|
||||
project.ext.compactProfile = 'compact3'
|
||||
project.afterEvaluate {
|
||||
// fail on all javac warnings
|
||||
project.tasks.withType(JavaCompile) {
|
||||
|
@ -295,6 +296,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
// compile with compact 3 profile by default
|
||||
// NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
|
||||
if (project.compactProfile != 'full') {
|
||||
options.compilerArgs << '-profile' << project.compactProfile
|
||||
}
|
||||
options.encoding = 'UTF-8'
|
||||
}
|
||||
}
|
||||
|
@ -365,6 +371,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
enableSystemAssertions false
|
||||
|
||||
testLogging {
|
||||
showNumFailuresAtEnd 25
|
||||
slowTests {
|
||||
heartbeat 10
|
||||
summarySize 5
|
||||
|
@ -409,4 +416,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
return test
|
||||
}
|
||||
|
||||
private static configurePrecommit(Project project) {
|
||||
Task precommit = PrecommitTasks.create(project, true)
|
||||
project.check.dependsOn(precommit)
|
||||
project.test.mustRunAfter(precommit)
|
||||
project.dependencyLicenses.dependencies = project.configurations.runtime - project.configurations.provided
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,63 +18,104 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputDirectory
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.StopActionException
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.VerificationTask
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.security.MessageDigest
|
||||
import java.util.regex.Matcher
|
||||
import java.util.regex.Pattern
|
||||
|
||||
class DependencyLicensesTask extends DefaultTask {
|
||||
static final String SHA_EXTENSION = '.sha1'
|
||||
|
||||
static Task configure(Project project, Closure closure) {
|
||||
DependencyLicensesTask task = project.tasks.create(type: DependencyLicensesTask, name: 'dependencyLicenses')
|
||||
UpdateShasTask update = project.tasks.create(type: UpdateShasTask, name: 'updateShas')
|
||||
update.parentTask = task
|
||||
task.configure(closure)
|
||||
project.check.dependsOn(task)
|
||||
return task
|
||||
}
|
||||
/**
|
||||
* A task to check licenses for dependencies.
|
||||
*
|
||||
* There are two parts to the check:
|
||||
* <ul>
|
||||
* <li>LICENSE and NOTICE files</li>
|
||||
* <li>SHA checksums for each dependency jar</li>
|
||||
* </ul>
|
||||
*
|
||||
* The directory to find the license and sha files in defaults to the dir @{code licenses}
|
||||
* in the project directory for this task. You can override this directory:
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* licensesDir = project.file('mybetterlicensedir')
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* The jar files to check default to the dependencies from the default configuration. You
|
||||
* can override this, for example, to only check compile dependencies:
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* dependencies = project.configurations.compile
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* Every jar must have a {@code .sha1} file in the licenses dir. These can be managed
|
||||
* automatically using the {@code updateShas} helper task that is created along
|
||||
* with this task. It will add {@code .sha1} files for new jars that are in dependencies
|
||||
* and remove old {@code .sha1} files that are no longer needed.
|
||||
*
|
||||
* Every jar must also have a LICENSE and NOTICE file. However, multiple jars can share
|
||||
* LICENSE and NOTICE files by mapping a pattern to the same name.
|
||||
* <pre>
|
||||
* dependencyLicenses {
|
||||
* mapping from: /lucene-.*/, to: 'lucene'
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
public class DependencyLicensesTask extends DefaultTask {
|
||||
private static final String SHA_EXTENSION = '.sha1'
|
||||
|
||||
// TODO: we should be able to default this to eg compile deps, but we need to move the licenses
|
||||
// check from distribution to core (ie this should only be run on java projects)
|
||||
/** A collection of jar files that should be checked. */
|
||||
@InputFiles
|
||||
FileCollection dependencies
|
||||
public FileCollection dependencies
|
||||
|
||||
/** The directory to find the license and sha files in. */
|
||||
@InputDirectory
|
||||
File licensesDir = new File(project.projectDir, 'licenses')
|
||||
public File licensesDir = new File(project.projectDir, 'licenses')
|
||||
|
||||
LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
|
||||
/** A map of patterns to prefix, used to find the LICENSE and NOTICE file. */
|
||||
private LinkedHashMap<String, String> mappings = new LinkedHashMap<>()
|
||||
|
||||
/**
|
||||
* Add a mapping from a regex pattern for the jar name, to a prefix to find
|
||||
* the LICENSE and NOTICE file for that jar.
|
||||
*/
|
||||
@Input
|
||||
void mapping(Map<String, String> props) {
|
||||
String from = props.get('from')
|
||||
public void mapping(Map<String, String> props) {
|
||||
String from = props.remove('from')
|
||||
if (from == null) {
|
||||
throw new InvalidUserDataException('Missing "from" setting for license name mapping')
|
||||
}
|
||||
String to = props.get('to')
|
||||
String to = props.remove('to')
|
||||
if (to == null) {
|
||||
throw new InvalidUserDataException('Missing "to" setting for license name mapping')
|
||||
}
|
||||
if (props.isEmpty() == false) {
|
||||
throw new InvalidUserDataException("Unknown properties for mapping on dependencyLicenses: ${props.keySet()}")
|
||||
}
|
||||
mappings.put(from, to)
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
void checkDependencies() {
|
||||
// TODO: empty license dir (or error when dir exists and no deps)
|
||||
public void checkDependencies() {
|
||||
// TODO REMOVE THIS DIRTY FIX FOR #15168
|
||||
if (licensesDir.exists() == false) {
|
||||
return
|
||||
}
|
||||
if (licensesDir.exists() == false && dependencies.isEmpty() == false) {
|
||||
throw new GradleException("Licences dir ${licensesDir} does not exist, but there are dependencies")
|
||||
}
|
||||
if (licensesDir.exists() && dependencies.isEmpty()) {
|
||||
throw new GradleException("Licenses dir ${licensesDir} exists, but there are no dependencies")
|
||||
}
|
||||
|
||||
// order is the same for keys and values iteration since we use a linked hashmap
|
||||
List<String> mapped = new ArrayList<>(mappings.values())
|
||||
|
@ -127,7 +168,7 @@ class DependencyLicensesTask extends DefaultTask {
|
|||
}
|
||||
}
|
||||
|
||||
void checkSha(File jar, String jarName, Set<File> shaFiles) {
|
||||
private void checkSha(File jar, String jarName, Set<File> shaFiles) {
|
||||
File shaFile = new File(licensesDir, jarName + SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
throw new GradleException("Missing SHA for ${jarName}. Run 'gradle updateSHAs' to create")
|
||||
|
@ -143,7 +184,7 @@ class DependencyLicensesTask extends DefaultTask {
|
|||
shaFiles.remove(shaFile)
|
||||
}
|
||||
|
||||
void checkFile(String name, String jarName, Map<String, Integer> counters, String type) {
|
||||
private void checkFile(String name, String jarName, Map<String, Integer> counters, String type) {
|
||||
String fileName = "${name}-${type}"
|
||||
Integer count = counters.get(fileName)
|
||||
if (count == null) {
|
||||
|
@ -158,10 +199,12 @@ class DependencyLicensesTask extends DefaultTask {
|
|||
counters.put(fileName, count + 1)
|
||||
}
|
||||
|
||||
static class UpdateShasTask extends DefaultTask {
|
||||
DependencyLicensesTask parentTask
|
||||
/** A helper task to update the sha files in the license dir. */
|
||||
public static class UpdateShasTask extends DefaultTask {
|
||||
private DependencyLicensesTask parentTask
|
||||
|
||||
@TaskAction
|
||||
void updateShas() {
|
||||
public void updateShas() {
|
||||
Set<File> shaFiles = new HashSet<File>()
|
||||
parentTask.licensesDir.eachFile {
|
||||
String name = it.getName()
|
||||
|
|
|
@ -19,10 +19,11 @@
|
|||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
import org.gradle.api.tasks.OutputFiles
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
import org.gradle.api.tasks.util.PatternFilterable
|
||||
|
@ -33,14 +34,19 @@ import java.util.regex.Pattern
|
|||
/**
|
||||
* Checks for patterns in source files for the project which are forbidden.
|
||||
*/
|
||||
class ForbiddenPatternsTask extends DefaultTask {
|
||||
Map<String,String> patterns = new LinkedHashMap<>()
|
||||
PatternFilterable filesFilter = new PatternSet()
|
||||
public class ForbiddenPatternsTask extends DefaultTask {
|
||||
|
||||
/** The rules: a map from the rule name, to a rule regex pattern. */
|
||||
private Map<String,String> patterns = new LinkedHashMap<>()
|
||||
/** A pattern set of which files should be checked. */
|
||||
private PatternFilterable filesFilter = new PatternSet()
|
||||
|
||||
@OutputFile
|
||||
File outputMarker = new File(project.buildDir, "markers/forbiddenPatterns")
|
||||
|
||||
ForbiddenPatternsTask() {
|
||||
public ForbiddenPatternsTask() {
|
||||
description = 'Checks source files for invalid patterns like nocommits or tabs'
|
||||
|
||||
// we always include all source files, and exclude what should not be checked
|
||||
filesFilter.include('**')
|
||||
// exclude known binary extensions
|
||||
|
@ -52,23 +58,28 @@ class ForbiddenPatternsTask extends DefaultTask {
|
|||
filesFilter.exclude('**/*.crt')
|
||||
filesFilter.exclude('**/*.png')
|
||||
|
||||
// TODO: add compile and test compile outputs as this tasks outputs, so we don't rerun when source files haven't changed
|
||||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
}
|
||||
|
||||
/** Adds a file glob pattern to be excluded */
|
||||
void exclude(String... excludes) {
|
||||
public void exclude(String... excludes) {
|
||||
this.filesFilter.exclude(excludes)
|
||||
}
|
||||
|
||||
/** Adds pattern to forbid */
|
||||
/** Adds a pattern to forbid. T */
|
||||
void rule(Map<String,String> props) {
|
||||
String name = props.get('name')
|
||||
String name = props.remove('name')
|
||||
if (name == null) {
|
||||
throw new IllegalArgumentException('Missing [name] for invalid pattern rule')
|
||||
throw new InvalidUserDataException('Missing [name] for invalid pattern rule')
|
||||
}
|
||||
String pattern = props.get('pattern')
|
||||
String pattern = props.remove('pattern')
|
||||
if (pattern == null) {
|
||||
throw new IllegalArgumentException('Missing [pattern] for invalid pattern rule')
|
||||
throw new InvalidUserDataException('Missing [pattern] for invalid pattern rule')
|
||||
}
|
||||
if (props.isEmpty() == false) {
|
||||
throw new InvalidUserDataException("Unknown arguments for ForbiddenPatterns rule mapping: ${props.keySet()}")
|
||||
}
|
||||
// TODO: fail if pattern contains a newline, it won't work (currently)
|
||||
patterns.put(name, pattern)
|
||||
|
@ -89,14 +100,14 @@ class ForbiddenPatternsTask extends DefaultTask {
|
|||
Pattern allPatterns = Pattern.compile('(' + patterns.values().join(')|(') + ')')
|
||||
List<String> failures = new ArrayList<>()
|
||||
for (File f : files()) {
|
||||
f.eachLine('UTF-8') { line, lineNumber ->
|
||||
f.eachLine('UTF-8') { String line, int lineNumber ->
|
||||
if (allPatterns.matcher(line).find()) {
|
||||
addErrorMessages(failures, f, (String)line, (int)lineNumber)
|
||||
addErrorMessages(failures, f, line, lineNumber)
|
||||
}
|
||||
}
|
||||
}
|
||||
if (failures.isEmpty() == false) {
|
||||
throw new IllegalArgumentException('Found invalid patterns:\n' + failures.join('\n'))
|
||||
throw new GradleException('Found invalid patterns:\n' + failures.join('\n'))
|
||||
}
|
||||
outputMarker.setText('done', 'UTF-8')
|
||||
}
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.InputFile
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs CheckJarHell on a classpath.
|
||||
*/
|
||||
public class JarHellTask extends LoggedExec {
|
||||
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
public File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
|
||||
/** The classpath to run jarhell check on, defaults to the test runtime classpath */
|
||||
@InputFile
|
||||
public FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
|
||||
public JarHellTask() {
|
||||
project.afterEvaluate {
|
||||
dependsOn(classpath)
|
||||
description = "Runs CheckJarHell on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
doFirst({
|
||||
/* JarHell doesn't like getting directories that don't exist but
|
||||
gradle isn't especially careful about that. So we have to do it
|
||||
filter it ourselves. */
|
||||
FileCollection taskClasspath = classpath.filter { it.exists() }
|
||||
args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
|
||||
})
|
||||
doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,16 +18,10 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApisExtension
|
||||
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.plugins.JavaBasePlugin
|
||||
import org.gradle.api.tasks.Exec
|
||||
import org.gradle.api.tasks.TaskContainer
|
||||
|
||||
/**
|
||||
* Validation tasks which should be run before committing. These run before tests.
|
||||
|
@ -35,36 +29,34 @@ import org.gradle.api.tasks.TaskContainer
|
|||
class PrecommitTasks {
|
||||
|
||||
/** Adds a precommit task, which depends on non-test verification tasks. */
|
||||
static void configure(Project project) {
|
||||
List precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureForbiddenPatterns(project.tasks),
|
||||
configureJarHell(project)]
|
||||
public static Task create(Project project, boolean includeDependencyLicenses) {
|
||||
|
||||
Map precommitOptions = [
|
||||
name: 'precommit',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs all non-test checks.',
|
||||
dependsOn: precommitTasks
|
||||
]
|
||||
Task precommit = project.tasks.create(precommitOptions)
|
||||
project.check.dependsOn(precommit)
|
||||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class)]
|
||||
|
||||
// delay ordering relative to test tasks, since they may not be setup yet
|
||||
project.afterEvaluate {
|
||||
Task test = project.tasks.findByName('test')
|
||||
if (test != null) {
|
||||
test.mustRunAfter(precommit)
|
||||
}
|
||||
Task integTest = project.tasks.findByName('integTest')
|
||||
if (integTest != null) {
|
||||
integTest.mustRunAfter(precommit)
|
||||
}
|
||||
// tasks with just tests don't need dependency licenses, so this flag makes adding
|
||||
// the task optional
|
||||
if (includeDependencyLicenses) {
|
||||
DependencyLicensesTask dependencyLicenses = project.tasks.create('dependencyLicenses', DependencyLicensesTask.class)
|
||||
precommitTasks.add(dependencyLicenses)
|
||||
// we also create the updateShas helper task that is associated with dependencyLicenses
|
||||
UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class)
|
||||
updateShas.parentTask = dependencyLicenses
|
||||
}
|
||||
|
||||
Map<String, Object> precommitOptions = [
|
||||
name: 'precommit',
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Runs all non-test checks.',
|
||||
dependsOn: precommitTasks
|
||||
]
|
||||
return project.tasks.create(precommitOptions)
|
||||
}
|
||||
|
||||
static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply('de.thetaphi.forbiddenapis')
|
||||
private static Task configureForbiddenApis(Project project) {
|
||||
project.pluginManager.apply(ForbiddenApisPlugin.class)
|
||||
project.forbiddenApis {
|
||||
internalRuntimeForbidden = true
|
||||
failOnUnsupportedJava = false
|
||||
|
@ -75,7 +67,7 @@ class PrecommitTasks {
|
|||
Task mainForbidden = project.tasks.findByName('forbiddenApisMain')
|
||||
if (mainForbidden != null) {
|
||||
mainForbidden.configure {
|
||||
bundledSignatures += ['jdk-system-out']
|
||||
bundledSignatures += 'jdk-system-out'
|
||||
signaturesURLs += [
|
||||
getClass().getResource('/forbidden/core-signatures.txt'),
|
||||
getClass().getResource('/forbidden/third-party-signatures.txt')]
|
||||
|
@ -84,63 +76,11 @@ class PrecommitTasks {
|
|||
Task testForbidden = project.tasks.findByName('forbiddenApisTest')
|
||||
if (testForbidden != null) {
|
||||
testForbidden.configure {
|
||||
signaturesURLs += [getClass().getResource('/forbidden/test-signatures.txt')]
|
||||
signaturesURLs += getClass().getResource('/forbidden/test-signatures.txt')
|
||||
}
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
|
||||
return forbiddenApis
|
||||
}
|
||||
|
||||
static Task configureForbiddenPatterns(TaskContainer tasks) {
|
||||
Map options = [
|
||||
name: 'forbiddenPatterns',
|
||||
type: ForbiddenPatternsTask,
|
||||
description: 'Checks source files for invalid patterns like nocommits or tabs',
|
||||
]
|
||||
return tasks.create(options) {
|
||||
rule name: 'nocommit', pattern: /nocommit/
|
||||
rule name: 'tab', pattern: /\t/
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a task to run jar hell before on the test classpath.
|
||||
*
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
static Task configureJarHell(Project project) {
|
||||
File successMarker = new File(project.buildDir, 'markers/jarHell')
|
||||
Exec task = project.tasks.create(name: 'jarHell', type: Exec)
|
||||
FileCollection testClasspath = project.sourceSets.test.runtimeClasspath
|
||||
task.dependsOn(testClasspath)
|
||||
task.inputs.files(testClasspath)
|
||||
task.outputs.file(successMarker)
|
||||
task.executable = new File(project.javaHome, 'bin/java')
|
||||
task.doFirst({
|
||||
/* JarHell doesn't like getting directories that don't exist but
|
||||
gradle isn't especially careful about that. So we have to do it
|
||||
filter it ourselves. */
|
||||
def taskClasspath = testClasspath.filter { it.exists() }
|
||||
task.args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell')
|
||||
})
|
||||
if (task.logger.isInfoEnabled() == false) {
|
||||
task.standardOutput = new ByteArrayOutputStream()
|
||||
task.errorOutput = task.standardOutput
|
||||
task.ignoreExitValue = true
|
||||
task.doLast({
|
||||
if (execResult.exitValue != 0) {
|
||||
logger.error(standardOutput.toString())
|
||||
throw new GradleException("JarHell failed")
|
||||
}
|
||||
})
|
||||
}
|
||||
task.doLast({
|
||||
successMarker.parentFile.mkdirs()
|
||||
successMarker.setText("", 'UTF-8')
|
||||
})
|
||||
return task
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.gradle.api.DefaultTask
|
||||
import org.gradle.api.tasks.TaskAction
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.security.MessageDigest
|
||||
|
||||
/**
|
||||
* A task to update shas used by {@code DependencyLicensesCheck}
|
||||
*/
|
||||
public class UpdateShasTask extends DefaultTask {
|
||||
|
||||
/** The parent dependency licenses task to use configuration from */
|
||||
public DependencyLicensesTask parentTask
|
||||
|
||||
public UpdateShasTask() {
|
||||
description = 'Updates the sha files for the dependencyLicenses check'
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
public void updateShas() {
|
||||
Set<File> shaFiles = new HashSet<File>()
|
||||
parentTask.licensesDir.eachFile {
|
||||
String name = it.getName()
|
||||
if (name.endsWith(SHA_EXTENSION)) {
|
||||
shaFiles.add(it)
|
||||
}
|
||||
}
|
||||
for (File dependency : parentTask.dependencies) {
|
||||
String jarName = dependency.getName()
|
||||
File shaFile = new File(parentTask.licensesDir, jarName + SHA_EXTENSION)
|
||||
if (shaFile.exists() == false) {
|
||||
logger.lifecycle("Adding sha for ${jarName}")
|
||||
String sha = MessageDigest.getInstance("SHA-1").digest(dependency.getBytes()).encodeHex().toString()
|
||||
shaFile.setText(sha, 'UTF-8')
|
||||
} else {
|
||||
shaFiles.remove(shaFile)
|
||||
}
|
||||
}
|
||||
shaFiles.each { shaFile ->
|
||||
logger.lifecycle("Removing unused sha ${shaFile.getName()}")
|
||||
Files.delete(shaFile.toPath())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -58,6 +58,7 @@ class RestIntegTestTask extends RandomizedTestingTask {
|
|||
integTest.testClassesDir = test.testClassesDir
|
||||
integTest.mustRunAfter(test)
|
||||
}
|
||||
integTest.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(integTest)
|
||||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
|
|
|
@ -56,6 +56,7 @@ class StandaloneTestBasePlugin implements Plugin<Project> {
|
|||
plusConfigurations = [project.configurations.testRuntime]
|
||||
}
|
||||
}
|
||||
PrecommitTasks.configure(project)
|
||||
PrecommitTasks.create(project, false)
|
||||
project.check.dependsOn(project.precommit)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ class StandaloneTestPlugin implements Plugin<Project> {
|
|||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir project.sourceSets.test.output.classesDir
|
||||
}
|
||||
test.mustRunAfter(project.precommit)
|
||||
project.check.dependsOn(test)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -117,6 +117,9 @@ forbiddenPatterns {
|
|||
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
|
||||
}
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
if (isEclipse == false || project.path == ":core-tests") {
|
||||
task integTest(type: RandomizedTestingTask,
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener<ClusterHealthResponse> listener) {
|
||||
if (request.waitForEvents() != null) {
|
||||
final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis();
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return currentState;
|
||||
|
|
|
@ -68,7 +68,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener<ClusterRerouteResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterRerouteResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_reroute (api)", new AckedClusterStateUpdateTask<ClusterRerouteResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile ClusterState clusterStateToSend;
|
||||
private volatile RoutingExplanations explanations;
|
||||
|
|
|
@ -91,7 +91,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
final Settings.Builder transientUpdates = Settings.settingsBuilder();
|
||||
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
|
||||
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings", Priority.IMMEDIATE, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("cluster_update_settings",
|
||||
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) {
|
||||
|
||||
private volatile boolean changed = false;
|
||||
|
||||
|
@ -132,7 +133,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
// in the components (e.g. FilterAllocationDecider), so the changes made by the first call aren't visible
|
||||
// to the components until the ClusterStateListener instances have been invoked, but are visible after
|
||||
// the first update task has been completed.
|
||||
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("reroute_after_cluster_update_settings",
|
||||
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
public boolean mustAck(DiscoveryNode discoveryNode) {
|
||||
|
|
|
@ -46,9 +46,10 @@ import java.util.List;
|
|||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
* A bulk request holds an ordered {@link IndexRequest}s and {@link DeleteRequest}s and allows to executes
|
||||
* it in a single batch.
|
||||
* A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
|
||||
* and allows to executes it in a single batch.
|
||||
*
|
||||
* Note that we only support refresh on the bulk request not per item.
|
||||
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
|
||||
*/
|
||||
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest {
|
||||
|
@ -89,6 +90,12 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
return add(request, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a request to the current BulkRequest.
|
||||
* @param request Request to add
|
||||
* @param payload Optional payload
|
||||
* @return the current bulk request
|
||||
*/
|
||||
public BulkRequest add(ActionRequest request, @Nullable Object payload) {
|
||||
if (request instanceof IndexRequest) {
|
||||
add((IndexRequest) request, payload);
|
||||
|
@ -127,7 +134,8 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) {
|
||||
requests.add(request);
|
||||
addPayload(payload);
|
||||
sizeInBytes += request.source().length() + REQUEST_OVERHEAD;
|
||||
// lack of source is validated in validate() method
|
||||
sizeInBytes += (request.source() != null ? request.source().length() : 0) + REQUEST_OVERHEAD;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -478,8 +486,14 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
if (requests.isEmpty()) {
|
||||
validationException = addValidationError("no requests added", validationException);
|
||||
}
|
||||
for (int i = 0; i < requests.size(); i++) {
|
||||
ActionRequestValidationException ex = requests.get(i).validate();
|
||||
for (ActionRequest request : requests) {
|
||||
// We first check if refresh has been set
|
||||
if ((request instanceof DeleteRequest && ((DeleteRequest)request).refresh()) ||
|
||||
(request instanceof UpdateRequest && ((UpdateRequest)request).refresh()) ||
|
||||
(request instanceof IndexRequest && ((IndexRequest)request).refresh())) {
|
||||
validationException = addValidationError("Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.", validationException);
|
||||
}
|
||||
ActionRequestValidationException ex = request.validate();
|
||||
if (ex != null) {
|
||||
if (validationException == null) {
|
||||
validationException = new ActionRequestValidationException();
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
public interface AckedClusterStateTaskListener extends ClusterStateTaskListener {
|
||||
|
||||
/**
|
||||
* Called to determine which nodes the acknowledgement is expected from
|
||||
*
|
||||
* @param discoveryNode a node
|
||||
* @return true if the node is expected to send ack back, false otherwise
|
||||
*/
|
||||
boolean mustAck(DiscoveryNode discoveryNode);
|
||||
|
||||
/**
|
||||
* Called once all the nodes have acknowledged the cluster state update request. Must be
|
||||
* very lightweight execution, since it gets executed on the cluster service thread.
|
||||
*
|
||||
* @param t optional error that might have been thrown
|
||||
*/
|
||||
void onAllNodesAcked(@Nullable Throwable t);
|
||||
|
||||
/**
|
||||
* Called once the acknowledgement timeout defined by
|
||||
* {@link AckedClusterStateUpdateTask#ackTimeout()} has expired
|
||||
*/
|
||||
void onAckTimeout();
|
||||
|
||||
/**
|
||||
* Acknowledgement timeout, maximum time interval to wait for acknowledgements
|
||||
*/
|
||||
TimeValue ackTimeout();
|
||||
|
||||
}
|
|
@ -22,18 +22,24 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.ack.AckedRequest;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* An extension interface to {@link ClusterStateUpdateTask} that allows to be notified when
|
||||
* all the nodes have acknowledged a cluster state update request
|
||||
*/
|
||||
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask {
|
||||
public abstract class AckedClusterStateUpdateTask<Response> extends ClusterStateUpdateTask implements AckedClusterStateTaskListener {
|
||||
|
||||
private final ActionListener<Response> listener;
|
||||
private final AckedRequest request;
|
||||
|
||||
protected AckedClusterStateUpdateTask(AckedRequest request, ActionListener<Response> listener) {
|
||||
this(Priority.NORMAL, request, listener);
|
||||
}
|
||||
|
||||
protected AckedClusterStateUpdateTask(Priority priority, AckedRequest request, ActionListener<Response> listener) {
|
||||
super(priority);
|
||||
this.listener = listener;
|
||||
this.request = request;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
|
@ -101,12 +100,35 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
|
|||
void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener);
|
||||
|
||||
/**
|
||||
* Submits a task that will update the cluster state.
|
||||
* Submits a cluster state update task; submitted updates will be
|
||||
* batched across the same instance of executor. The exact batching
|
||||
* semantics depend on the underlying implementation but a rough
|
||||
* guideline is that if the update task is submitted while there
|
||||
* are pending update tasks for the same executor, these update
|
||||
* tasks will all be executed on the executor in a single batch
|
||||
*
|
||||
* @param source the source of the cluster state update task
|
||||
* @param task the state needed for the cluster state update task
|
||||
* @param config the cluster state update task configuration
|
||||
* @param executor the cluster state update task executor; tasks
|
||||
* that share the same executor will be executed
|
||||
* batches on this executor
|
||||
* @param listener callback after the cluster state update task
|
||||
* completes
|
||||
* @param <T> the type of the cluster state update task state
|
||||
*/
|
||||
void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask);
|
||||
<T> void submitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
final ClusterStateTaskExecutor<T> executor,
|
||||
final ClusterStateTaskListener listener);
|
||||
|
||||
/**
|
||||
* Submits a task that will update the cluster state (the task has a default priority of {@link Priority#NORMAL}).
|
||||
* Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener)},
|
||||
* submitted updates will not be batched.
|
||||
*
|
||||
* @param source the source of the cluster state update task
|
||||
* @param updateTask the full context for the cluster state update
|
||||
* task
|
||||
*/
|
||||
void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask);
|
||||
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
/**
|
||||
* Cluster state update task configuration for timeout and priority
|
||||
*/
|
||||
public interface ClusterStateTaskConfig {
|
||||
/**
|
||||
* The timeout for this cluster state update task configuration. If
|
||||
* the cluster state update task isn't processed within this
|
||||
* timeout, the associated {@link ClusterStateTaskListener#onFailure(String, Throwable)}
|
||||
* is invoked.
|
||||
*
|
||||
* @return the timeout, or null if one is not set
|
||||
*/
|
||||
@Nullable
|
||||
TimeValue timeout();
|
||||
|
||||
/**
|
||||
* The {@link Priority} for this cluster state update task configuration.
|
||||
*
|
||||
* @return the priority
|
||||
*/
|
||||
Priority priority();
|
||||
|
||||
/**
|
||||
* Build a cluster state update task configuration with the
|
||||
* specified {@link Priority} and no timeout.
|
||||
*
|
||||
* @param priority the priority for the associated cluster state
|
||||
* update task
|
||||
* @return the resulting cluster state update task configuration
|
||||
*/
|
||||
static ClusterStateTaskConfig build(Priority priority) {
|
||||
return new Basic(priority, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a cluster state update task configuration with the
|
||||
* specified {@link Priority} and timeout.
|
||||
*
|
||||
* @param priority the priority for the associated cluster state
|
||||
* update task
|
||||
* @param timeout the timeout for the associated cluster state
|
||||
* update task
|
||||
* @return the result cluster state update task configuration
|
||||
*/
|
||||
static ClusterStateTaskConfig build(Priority priority, TimeValue timeout) {
|
||||
return new Basic(priority, timeout);
|
||||
}
|
||||
|
||||
class Basic implements ClusterStateTaskConfig {
|
||||
final TimeValue timeout;
|
||||
final Priority priority;
|
||||
|
||||
public Basic(Priority priority, TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public interface ClusterStateTaskExecutor<T> {
|
||||
/**
|
||||
* Update the cluster state based on the current state and the given tasks. Return the *same instance* if no state
|
||||
* should be changed.
|
||||
*/
|
||||
BatchResult<T> execute(ClusterState currentState, List<T> tasks) throws Exception;
|
||||
|
||||
/**
|
||||
* indicates whether this task should only run if current node is master
|
||||
*/
|
||||
default boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the result of a batched execution of cluster state update tasks
|
||||
* @param <T> the type of the cluster state update task
|
||||
*/
|
||||
class BatchResult<T> {
|
||||
final public ClusterState resultingState;
|
||||
final public Map<T, TaskResult> executionResults;
|
||||
|
||||
/**
|
||||
* Construct an execution result instance with a correspondence between the tasks and their execution result
|
||||
* @param resultingState the resulting cluster state
|
||||
* @param executionResults the correspondence between tasks and their outcome
|
||||
*/
|
||||
BatchResult(ClusterState resultingState, Map<T, TaskResult> executionResults) {
|
||||
this.resultingState = resultingState;
|
||||
this.executionResults = executionResults;
|
||||
}
|
||||
|
||||
public static <T> Builder<T> builder() {
|
||||
return new Builder<>();
|
||||
}
|
||||
|
||||
public static class Builder<T> {
|
||||
private final Map<T, TaskResult> executionResults = new IdentityHashMap<>();
|
||||
|
||||
public Builder<T> success(T task) {
|
||||
return result(task, TaskResult.success());
|
||||
}
|
||||
|
||||
public Builder<T> successes(Iterable<T> tasks) {
|
||||
for (T task : tasks) {
|
||||
success(task);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder<T> failure(T task, Throwable t) {
|
||||
return result(task, TaskResult.failure(t));
|
||||
}
|
||||
|
||||
public Builder<T> failures(Iterable<T> tasks, Throwable t) {
|
||||
for (T task : tasks) {
|
||||
failure(task, t);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
private Builder<T> result(T task, TaskResult executionResult) {
|
||||
executionResults.put(task, executionResult);
|
||||
return this;
|
||||
}
|
||||
|
||||
public BatchResult<T> build(ClusterState resultingState) {
|
||||
return new BatchResult<>(resultingState, executionResults);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final class TaskResult {
|
||||
private final Throwable failure;
|
||||
|
||||
private static final TaskResult SUCCESS = new TaskResult(null);
|
||||
|
||||
public static TaskResult success() {
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
public static TaskResult failure(Throwable failure) {
|
||||
return new TaskResult(failure);
|
||||
}
|
||||
|
||||
private TaskResult(Throwable failure) {
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public boolean isSuccess() {
|
||||
return failure != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the execution result with the provided consumers
|
||||
* @param onSuccess handler to invoke on success
|
||||
* @param onFailure handler to invoke on failure; the throwable passed through will not be null
|
||||
*/
|
||||
public void handle(Runnable onSuccess, Consumer<Throwable> onFailure) {
|
||||
if (failure == null) {
|
||||
onSuccess.run();
|
||||
} else {
|
||||
onFailure.accept(failure);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,22 +16,28 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.fieldvisitor;
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import java.util.List;
|
||||
|
||||
import java.io.IOException;
|
||||
public interface ClusterStateTaskListener {
|
||||
|
||||
/**
|
||||
*/
|
||||
public class AllFieldsVisitor extends FieldsVisitor {
|
||||
/**
|
||||
* A callback called when execute fails.
|
||||
*/
|
||||
void onFailure(String source, Throwable t);
|
||||
|
||||
public AllFieldsVisitor() {
|
||||
super(true);
|
||||
/**
|
||||
* called when the task was rejected because the local node is no longer master
|
||||
*/
|
||||
default void onNoLongerMaster(String source) {
|
||||
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Status needsField(FieldInfo fieldInfo) throws IOException {
|
||||
return Status.YES;
|
||||
/**
|
||||
* Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed
|
||||
* properly by all listeners.
|
||||
*/
|
||||
default void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
}
|
||||
}
|
|
@ -20,13 +20,31 @@
|
|||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A task that can update the cluster state.
|
||||
*/
|
||||
abstract public class ClusterStateUpdateTask {
|
||||
abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<ClusterStateUpdateTask>, ClusterStateTaskListener {
|
||||
|
||||
final private Priority priority;
|
||||
|
||||
public ClusterStateUpdateTask() {
|
||||
this(Priority.NORMAL);
|
||||
}
|
||||
|
||||
public ClusterStateUpdateTask(Priority priority) {
|
||||
this.priority = priority;
|
||||
}
|
||||
|
||||
@Override
|
||||
final public BatchResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
|
||||
ClusterState result = execute(currentState);
|
||||
return BatchResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the cluster state based on the current state. Return the *same instance* if no state
|
||||
|
@ -39,28 +57,6 @@ abstract public class ClusterStateUpdateTask {
|
|||
*/
|
||||
abstract public void onFailure(String source, Throwable t);
|
||||
|
||||
|
||||
/**
|
||||
* indicates whether this task should only run if current node is master
|
||||
*/
|
||||
public boolean runOnlyOnMaster() {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* called when the task was rejected because the local node is no longer master
|
||||
*/
|
||||
public void onNoLongerMaster(String source) {
|
||||
onFailure(source, new NotMasterException("no longer master. source: [" + source + "]"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the result of the {@link #execute(ClusterState)} have been processed
|
||||
* properly by all listeners.
|
||||
*/
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
}
|
||||
|
||||
/**
|
||||
* If the cluster state update task wasn't processed by the provided timeout, call
|
||||
* {@link #onFailure(String, Throwable)}. May return null to indicate no timeout is needed (default).
|
||||
|
@ -70,5 +66,8 @@ abstract public class ClusterStateUpdateTask {
|
|||
return null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return priority;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -144,7 +144,8 @@ public class ShardStateAction extends AbstractComponent {
|
|||
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
|
||||
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
|
||||
failedShardQueue.add(shardRoutingEntry);
|
||||
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
|
||||
new ClusterStateUpdateTask(Priority.HIGH) {
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
|
@ -198,8 +199,13 @@ public class ShardStateAction extends AbstractComponent {
|
|||
// process started events as fast as possible, to make shards available
|
||||
startedShardsQueue.add(shardRoutingEntry);
|
||||
|
||||
clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", Priority.URGENT,
|
||||
clusterService.submitStateUpdateTask("shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
|
||||
new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
public Priority priority() {
|
||||
return Priority.URGENT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
|
||||
|
|
|
@ -170,12 +170,12 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
request.settings(updatedSettingsBuilder.build());
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -71,7 +70,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
Collection<String> indices = Arrays.asList(request.indices);
|
||||
final DeleteIndexListener listener = new DeleteIndexListener(userListener);
|
||||
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
|
|
@ -62,7 +62,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void indicesAliases(final IndicesAliasesClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("index-aliases", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
|
|
|
@ -76,7 +76,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
final String indicesAsString = Arrays.toString(request.indices());
|
||||
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("close-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
|
@ -140,7 +140,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
final String indicesAsString = Arrays.toString(request.indices());
|
||||
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
|
|
|
@ -56,7 +56,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
|
||||
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("remove-index-template [" + request.name + "]", new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
@ -143,7 +143,8 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
}
|
||||
final IndexTemplateMetaData template = templateBuilder.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("create-index-template [" + request.name + "], cause [" + request.cause + "]",
|
||||
new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
|
|
@ -22,17 +22,16 @@ package org.elasticsearch.cluster.metadata;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
|
@ -44,6 +43,7 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
/**
|
||||
* Service responsible for submitting mapping changes
|
||||
|
@ -53,13 +53,11 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
private final ClusterService clusterService;
|
||||
private final IndicesService indicesService;
|
||||
|
||||
// the mutex protect all the refreshOrUpdate variables!
|
||||
private final Object refreshOrUpdateMutex = new Object();
|
||||
private final List<MappingTask> refreshOrUpdateQueue = new ArrayList<>();
|
||||
private long refreshOrUpdateInsertOrder;
|
||||
private long refreshOrUpdateProcessedInsertOrder;
|
||||
final ClusterStateTaskExecutor<RefreshTask> refreshExecutor = new RefreshTaskExecutor();
|
||||
final ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> putMappingExecutor = new PutMappingExecutor();
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
|
||||
|
||||
@Inject
|
||||
public MetaDataMappingService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
|
||||
super(settings);
|
||||
|
@ -68,37 +66,23 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
}
|
||||
|
||||
static class MappingTask {
|
||||
static class RefreshTask {
|
||||
final String index;
|
||||
final String indexUUID;
|
||||
|
||||
MappingTask(String index, final String indexUUID) {
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
}
|
||||
}
|
||||
|
||||
static class RefreshTask extends MappingTask {
|
||||
final String[] types;
|
||||
|
||||
RefreshTask(String index, final String indexUUID, String[] types) {
|
||||
super(index, indexUUID);
|
||||
this.index = index;
|
||||
this.indexUUID = indexUUID;
|
||||
this.types = types;
|
||||
}
|
||||
}
|
||||
|
||||
static class UpdateTask extends MappingTask {
|
||||
final String type;
|
||||
final CompressedXContent mappingSource;
|
||||
final String nodeId; // null fr unknown
|
||||
final ActionListener<ClusterStateUpdateResponse> listener;
|
||||
|
||||
UpdateTask(String index, String indexUUID, String type, CompressedXContent mappingSource, String nodeId, ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
super(index, indexUUID);
|
||||
this.type = type;
|
||||
this.mappingSource = mappingSource;
|
||||
this.nodeId = nodeId;
|
||||
this.listener = listener;
|
||||
class RefreshTaskExecutor implements ClusterStateTaskExecutor<RefreshTask> {
|
||||
@Override
|
||||
public BatchResult<RefreshTask> execute(ClusterState currentState, List<RefreshTask> tasks) throws Exception {
|
||||
ClusterState newClusterState = executeRefresh(currentState, tasks);
|
||||
return BatchResult.<RefreshTask>builder().successes(tasks).build(newClusterState);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,50 +91,25 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
* as possible so we won't create the same index all the time for example for the updates on the same mapping
|
||||
* and generate a single cluster change event out of all of those.
|
||||
*/
|
||||
Tuple<ClusterState, List<MappingTask>> executeRefreshOrUpdate(final ClusterState currentState, final long insertionOrder) throws Exception {
|
||||
final List<MappingTask> allTasks = new ArrayList<>();
|
||||
|
||||
synchronized (refreshOrUpdateMutex) {
|
||||
if (refreshOrUpdateQueue.isEmpty()) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
// we already processed this task in a bulk manner in a previous cluster event, simply ignore
|
||||
// it so we will let other tasks get in and processed ones, we will handle the queued ones
|
||||
// later on in a subsequent cluster state event
|
||||
if (insertionOrder < refreshOrUpdateProcessedInsertOrder) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
}
|
||||
|
||||
allTasks.addAll(refreshOrUpdateQueue);
|
||||
refreshOrUpdateQueue.clear();
|
||||
|
||||
refreshOrUpdateProcessedInsertOrder = refreshOrUpdateInsertOrder;
|
||||
}
|
||||
|
||||
ClusterState executeRefresh(final ClusterState currentState, final List<RefreshTask> allTasks) throws Exception {
|
||||
if (allTasks.isEmpty()) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
return currentState;
|
||||
}
|
||||
|
||||
// break down to tasks per index, so we can optimize the on demand index service creation
|
||||
// to only happen for the duration of a single index processing of its respective events
|
||||
Map<String, List<MappingTask>> tasksPerIndex = new HashMap<>();
|
||||
for (MappingTask task : allTasks) {
|
||||
Map<String, List<RefreshTask>> tasksPerIndex = new HashMap<>();
|
||||
for (RefreshTask task : allTasks) {
|
||||
if (task.index == null) {
|
||||
logger.debug("ignoring a mapping task of type [{}] with a null index.", task);
|
||||
}
|
||||
List<MappingTask> indexTasks = tasksPerIndex.get(task.index);
|
||||
if (indexTasks == null) {
|
||||
indexTasks = new ArrayList<>();
|
||||
tasksPerIndex.put(task.index, indexTasks);
|
||||
}
|
||||
indexTasks.add(task);
|
||||
tasksPerIndex.computeIfAbsent(task.index, k -> new ArrayList<>()).add(task);
|
||||
}
|
||||
|
||||
boolean dirty = false;
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
for (Map.Entry<String, List<MappingTask>> entry : tasksPerIndex.entrySet()) {
|
||||
for (Map.Entry<String, List<RefreshTask>> entry : tasksPerIndex.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
IndexMetaData indexMetaData = mdBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
|
@ -160,9 +119,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
// the tasks lists to iterate over, filled with the list of mapping tasks, trying to keep
|
||||
// the latest (based on order) update mapping one per node
|
||||
List<MappingTask> allIndexTasks = entry.getValue();
|
||||
List<MappingTask> tasks = new ArrayList<>();
|
||||
for (MappingTask task : allIndexTasks) {
|
||||
List<RefreshTask> allIndexTasks = entry.getValue();
|
||||
List<RefreshTask> tasks = new ArrayList<>();
|
||||
for (RefreshTask task : allIndexTasks) {
|
||||
if (!indexMetaData.isSameUUID(task.indexUUID)) {
|
||||
logger.debug("[{}] ignoring task [{}] - index meta data doesn't match task uuid", index, task);
|
||||
continue;
|
||||
|
@ -178,12 +137,8 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
removeIndex = true;
|
||||
Set<String> typesToIntroduce = new HashSet<>();
|
||||
for (MappingTask task : tasks) {
|
||||
if (task instanceof UpdateTask) {
|
||||
typesToIntroduce.add(((UpdateTask) task).type);
|
||||
} else if (task instanceof RefreshTask) {
|
||||
Collections.addAll(typesToIntroduce, ((RefreshTask) task).types);
|
||||
}
|
||||
for (RefreshTask task : tasks) {
|
||||
Collections.addAll(typesToIntroduce, task.types);
|
||||
}
|
||||
for (String type : typesToIntroduce) {
|
||||
// only add the current relevant mapping (if exists)
|
||||
|
@ -209,80 +164,42 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
|
||||
if (!dirty) {
|
||||
return Tuple.tuple(currentState, allTasks);
|
||||
return currentState;
|
||||
}
|
||||
return Tuple.tuple(ClusterState.builder(currentState).metaData(mdBuilder).build(), allTasks);
|
||||
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
||||
}
|
||||
|
||||
private boolean processIndexMappingTasks(List<MappingTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
|
||||
private boolean processIndexMappingTasks(List<RefreshTask> tasks, IndexService indexService, IndexMetaData.Builder builder) {
|
||||
boolean dirty = false;
|
||||
String index = indexService.index().name();
|
||||
// keep track of what we already refreshed, no need to refresh it again...
|
||||
Set<String> processedRefreshes = new HashSet<>();
|
||||
for (MappingTask task : tasks) {
|
||||
if (task instanceof RefreshTask) {
|
||||
RefreshTask refreshTask = (RefreshTask) task;
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (String type : refreshTask.types) {
|
||||
if (processedRefreshes.contains(type)) {
|
||||
continue;
|
||||
}
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
|
||||
if (mapper == null) {
|
||||
continue;
|
||||
}
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
processedRefreshes.add(type);
|
||||
}
|
||||
|
||||
if (updatedTypes.isEmpty()) {
|
||||
for (RefreshTask refreshTask : tasks) {
|
||||
try {
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (String type : refreshTask.types) {
|
||||
if (processedRefreshes.contains(type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
|
||||
}
|
||||
} else if (task instanceof UpdateTask) {
|
||||
UpdateTask updateTask = (UpdateTask) task;
|
||||
try {
|
||||
String type = updateTask.type;
|
||||
CompressedXContent mappingSource = updateTask.mappingSource;
|
||||
|
||||
MappingMetaData mappingMetaData = builder.mapping(type);
|
||||
if (mappingMetaData != null && mappingMetaData.source().equals(mappingSource)) {
|
||||
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as its source is equal to ours", index, updateTask.type);
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapper(type);
|
||||
if (mapper == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
DocumentMapper updatedMapper = indexService.mapperService().merge(type, mappingSource, false, true);
|
||||
if (!mapper.mappingSource().equals(builder.mapping(type).source())) {
|
||||
updatedTypes.add(type);
|
||||
builder.putMapping(new MappingMetaData(mapper));
|
||||
}
|
||||
processedRefreshes.add(type);
|
||||
|
||||
// if we end up with the same mapping as the original once, ignore
|
||||
if (mappingMetaData != null && mappingMetaData.source().equals(updatedMapper.mappingSource())) {
|
||||
logger.debug("[{}] update_mapping [{}] ignoring mapping update task as it results in the same source as what we have", index, updateTask.type);
|
||||
continue;
|
||||
}
|
||||
|
||||
// build the updated mapping source
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] (dynamic) with source [{}]", index, type, updatedMapper.mappingSource());
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}] (dynamic)", index, type);
|
||||
}
|
||||
|
||||
builder.putMapping(new MappingMetaData(updatedMapper));
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to update-mapping in cluster state, type [{}]", index, updateTask.type);
|
||||
}
|
||||
} else {
|
||||
logger.warn("illegal state, got wrong mapping task type [{}]", task);
|
||||
|
||||
if (updatedTypes.isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
logger.warn("[{}] re-syncing mappings with cluster state for types [{}]", index, updatedTypes);
|
||||
dirty = true;
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to refresh-mapping in cluster state, types [{}]", index, refreshTask.types);
|
||||
}
|
||||
}
|
||||
return dirty;
|
||||
|
@ -292,197 +209,209 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
* Refreshes mappings if they are not the same between original and parsed version
|
||||
*/
|
||||
public void refreshMapping(final String index, final String indexUUID, final String... types) {
|
||||
final long insertOrder;
|
||||
synchronized (refreshOrUpdateMutex) {
|
||||
insertOrder = ++refreshOrUpdateInsertOrder;
|
||||
refreshOrUpdateQueue.add(new RefreshTask(index, indexUUID, types));
|
||||
}
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
private volatile List<MappingTask> allTasks;
|
||||
final RefreshTask refreshTask = new RefreshTask(index, indexUUID, types);
|
||||
clusterService.submitStateUpdateTask("refresh-mapping [" + index + "][" + Arrays.toString(types) + "]",
|
||||
refreshTask,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH),
|
||||
refreshExecutor,
|
||||
(source, t) -> logger.warn("failure during [{}]", t, source)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.warn("failure during [{}]", t, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Tuple<ClusterState, List<MappingTask>> tuple = executeRefreshOrUpdate(currentState, insertOrder);
|
||||
this.allTasks = tuple.v2();
|
||||
return tuple.v1();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
if (allTasks == null) {
|
||||
return;
|
||||
class PutMappingExecutor implements ClusterStateTaskExecutor<PutMappingClusterStateUpdateRequest> {
|
||||
@Override
|
||||
public BatchResult<PutMappingClusterStateUpdateRequest> execute(ClusterState currentState, List<PutMappingClusterStateUpdateRequest> tasks) throws Exception {
|
||||
Set<String> indicesToClose = new HashSet<>();
|
||||
BatchResult.Builder<PutMappingClusterStateUpdateRequest> builder = BatchResult.builder();
|
||||
try {
|
||||
// precreate incoming indices;
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
// failures here mean something is broken with our cluster state - fail all tasks by letting exceptions bubble up
|
||||
for (String index : request.indices()) {
|
||||
if (currentState.metaData().hasIndex(index)) {
|
||||
// if we don't have the index, we will throw exceptions later;
|
||||
if (indicesService.hasIndex(index) == false || indicesToClose.contains(index)) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexService indexService;
|
||||
if (indicesService.hasIndex(index) == false) {
|
||||
indicesToClose.add(index);
|
||||
indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
// make sure to add custom default mapping if exists
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
||||
}
|
||||
} else {
|
||||
indexService = indicesService.indexService(index);
|
||||
}
|
||||
// only add the current relevant mapping (if exists and not yet added)
|
||||
if (indexMetaData.getMappings().containsKey(request.type()) &&
|
||||
!indexService.mapperService().hasMapping(request.type())) {
|
||||
indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Object task : allTasks) {
|
||||
if (task instanceof UpdateTask) {
|
||||
UpdateTask uTask = (UpdateTask) task;
|
||||
ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true);
|
||||
uTask.listener.onResponse(response);
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
try {
|
||||
currentState = applyRequest(currentState, request);
|
||||
builder.success(request);
|
||||
} catch (Throwable t) {
|
||||
builder.failure(request, t);
|
||||
}
|
||||
}
|
||||
|
||||
return builder.build(currentState);
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
|
||||
Map<String, DocumentMapper> newMappers = new HashMap<>();
|
||||
Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new MergeMappingException(mergeResult.buildConflicts());
|
||||
}
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
// new types all at once , which can create a false error.
|
||||
|
||||
// For example in MapperService we can't distinguish between a create index api call
|
||||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
newMappers.put(index, newMapper);
|
||||
if (existingMapper != null) {
|
||||
existingMappers.put(index, existingMapper);
|
||||
}
|
||||
}
|
||||
|
||||
String mappingType = request.type();
|
||||
if (mappingType == null) {
|
||||
mappingType = newMappers.values().iterator().next().type();
|
||||
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
DocumentMapper newMapper = entry.getValue();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
if (existingSource.equals(updatedSource)) {
|
||||
// same source, no changes, ignore it
|
||||
} else {
|
||||
// use the merged mapping source
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if (mappings.isEmpty()) {
|
||||
// no changes, return
|
||||
return currentState;
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String indexName : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(indexName);
|
||||
}
|
||||
MappingMetaData mappingMd = mappings.get(indexName);
|
||||
if (mappingMd != null) {
|
||||
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
||||
}
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(builder).build();
|
||||
}
|
||||
}
|
||||
|
||||
public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]",
|
||||
request,
|
||||
ClusterStateTaskConfig.build(Priority.HIGH, request.masterNodeTimeout()),
|
||||
putMappingExecutor,
|
||||
new AckedClusterStateTaskListener() {
|
||||
|
||||
clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
return new ClusterStateUpdateResponse(acknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) throws Exception {
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
try {
|
||||
for (String index : request.indices()) {
|
||||
if (!currentState.metaData().hasIndex(index)) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
// pre create indices here and add mappings to them so we can merge the mappings here if needed
|
||||
for (String index : request.indices()) {
|
||||
if (indicesService.hasIndex(index)) {
|
||||
continue;
|
||||
}
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData, Collections.EMPTY_LIST);
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
// make sure to add custom default mapping if exists
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
||||
}
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.getMappings().containsKey(request.type())) {
|
||||
indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
||||
}
|
||||
@Override
|
||||
public boolean mustAck(DiscoveryNode discoveryNode) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Map<String, DocumentMapper> newMappers = new HashMap<>();
|
||||
Map<String, DocumentMapper> existingMappers = new HashMap<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
|
||||
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
|
||||
} else {
|
||||
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
|
||||
if (existingMapper != null) {
|
||||
// first, simulate
|
||||
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
|
||||
// if we have conflicts, throw an exception
|
||||
if (mergeResult.hasConflicts()) {
|
||||
throw new MergeMappingException(mergeResult.buildConflicts());
|
||||
}
|
||||
} else {
|
||||
// TODO: can we find a better place for this validation?
|
||||
// The reason this validation is here is that the mapper service doesn't learn about
|
||||
// new types all at once , which can create a false error.
|
||||
|
||||
// For example in MapperService we can't distinguish between a create index api call
|
||||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
newMappers.put(index, newMapper);
|
||||
if (existingMapper != null) {
|
||||
existingMappers.put(index, existingMapper);
|
||||
}
|
||||
@Override
|
||||
public void onAllNodesAcked(@Nullable Throwable t) {
|
||||
listener.onResponse(new ClusterStateUpdateResponse(true));
|
||||
}
|
||||
|
||||
String mappingType = request.type();
|
||||
if (mappingType == null) {
|
||||
mappingType = newMappers.values().iterator().next().type();
|
||||
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
|
||||
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
|
||||
}
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
@Override
|
||||
public void onAckTimeout() {
|
||||
listener.onResponse(new ClusterStateUpdateResponse(false));
|
||||
}
|
||||
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
|
||||
String index = entry.getKey();
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
DocumentMapper newMapper = entry.getValue();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
CompressedXContent existingSource = null;
|
||||
if (existingMappers.containsKey(entry.getKey())) {
|
||||
existingSource = existingMappers.get(entry.getKey()).mappingSource();
|
||||
}
|
||||
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
|
||||
CompressedXContent updatedSource = mergedMapper.mappingSource();
|
||||
|
||||
if (existingSource != null) {
|
||||
if (existingSource.equals(updatedSource)) {
|
||||
// same source, no changes, ignore it
|
||||
} else {
|
||||
// use the merged mapping source
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, newMapper.type());
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public TimeValue ackTimeout() {
|
||||
return request.ackTimeout();
|
||||
}
|
||||
|
||||
if (mappings.isEmpty()) {
|
||||
// no changes, return
|
||||
return currentState;
|
||||
}
|
||||
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String indexName : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(indexName);
|
||||
}
|
||||
MappingMetaData mappingMd = mappings.get(indexName);
|
||||
if (mappingMd != null) {
|
||||
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
||||
}
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(builder).build();
|
||||
} finally {
|
||||
for (String index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,11 +24,7 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
|
@ -44,13 +40,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
||||
|
@ -219,7 +209,8 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
final Settings openSettings = updatedSettingsBuilder.build();
|
||||
|
||||
clusterService.submitStateUpdateTask("update-settings", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("update-settings",
|
||||
new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
|
@ -334,7 +325,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
|
||||
|
||||
clusterService.submitStateUpdateTask("update-index-compatibility-versions", Priority.URGENT, new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(request, listener) {
|
||||
clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, listener) {
|
||||
|
||||
@Override
|
||||
protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
|
||||
|
|
|
@ -147,7 +147,7 @@ public class RoutingService extends AbstractLifecycleComponent<RoutingService> i
|
|||
return;
|
||||
}
|
||||
logger.trace("rerouting {}", reason);
|
||||
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", Priority.HIGH, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
rerouting.set(false);
|
||||
|
|
|
@ -20,16 +20,8 @@
|
|||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.ClusterState.Builder;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.LocalNodeMasterListener;
|
||||
import org.elasticsearch.cluster.TimeoutClusterStateListener;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
@ -41,6 +33,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -49,13 +42,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.text.StringText;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
|
||||
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.*;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
|
@ -63,18 +50,10 @@ import org.elasticsearch.node.settings.NodeSettingsService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
|
||||
|
@ -111,6 +90,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Map<ClusterStateTaskExecutor, List<UpdateTask>> updateTasksPerExecutor = new HashMap<>();
|
||||
// TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API
|
||||
private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>();
|
||||
private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners, clusterStateListeners, lastClusterStateListeners);
|
||||
|
@ -289,30 +269,33 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
@Override
|
||||
public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
|
||||
submitStateUpdateTask(source, Priority.NORMAL, updateTask);
|
||||
submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void submitStateUpdateTask(final String source, Priority priority, final ClusterStateUpdateTask updateTask) {
|
||||
public <T> void submitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
final ClusterStateTaskExecutor<T> executor,
|
||||
final ClusterStateTaskListener listener
|
||||
) {
|
||||
if (!lifecycle.started()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
final UpdateTask task = new UpdateTask(source, priority, updateTask);
|
||||
if (updateTask.timeout() != null) {
|
||||
updateTasksExecutor.execute(task, threadPool.scheduler(), updateTask.timeout(), new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
threadPool.generic().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
updateTask.onFailure(task.source(), new ProcessClusterEventTimeoutException(updateTask.timeout(), task.source()));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
final UpdateTask<T> updateTask = new UpdateTask<>(source, task, config, executor, listener);
|
||||
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>()).add(updateTask);
|
||||
}
|
||||
|
||||
if (config.timeout() != null) {
|
||||
updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
|
||||
if (updateTask.processed.getAndSet(true) == false) {
|
||||
listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
|
||||
}}));
|
||||
} else {
|
||||
updateTasksExecutor.execute(task);
|
||||
updateTasksExecutor.execute(updateTask);
|
||||
}
|
||||
} catch (EsRejectedExecutionException e) {
|
||||
// ignore cases where we are shutting down..., there is really nothing interesting
|
||||
|
@ -379,188 +362,238 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
class UpdateTask extends SourcePrioritizedRunnable {
|
||||
|
||||
public final ClusterStateUpdateTask updateTask;
|
||||
|
||||
UpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
|
||||
super(priority, source);
|
||||
this.updateTask = updateTask;
|
||||
<T> void runTasksForExecutor(ClusterStateTaskExecutor<T> executor) {
|
||||
final ArrayList<UpdateTask<T>> toExecute = new ArrayList<>();
|
||||
final ArrayList<String> sources = new ArrayList<>();
|
||||
synchronized (updateTasksPerExecutor) {
|
||||
List<UpdateTask> pending = updateTasksPerExecutor.remove(executor);
|
||||
if (pending != null) {
|
||||
for (UpdateTask<T> task : pending) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.trace("will process [{}]", task.source);
|
||||
toExecute.add(task);
|
||||
sources.add(task.source);
|
||||
} else {
|
||||
logger.trace("skipping [{}], already processed", task.source);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (toExecute.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
final String source = Strings.collectionToCommaDelimitedString(sources);
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
|
||||
return;
|
||||
}
|
||||
logger.debug("processing [{}]: execute", source);
|
||||
ClusterState previousClusterState = clusterState;
|
||||
if (!previousClusterState.nodes().localNodeMaster() && executor.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", source);
|
||||
toExecute.stream().forEach(task -> task.listener.onNoLongerMaster(task.source));
|
||||
return;
|
||||
}
|
||||
ClusterStateTaskExecutor.BatchResult<T> batchResult;
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
List<T> inputs = toExecute.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList());
|
||||
batchResult = executor.execute(previousClusterState, inputs);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failures(toExecute.stream().map(updateTask -> updateTask.task)::iterator, e).build(previousClusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (!lifecycle.started()) {
|
||||
logger.debug("processing [{}]: ignoring, cluster_service not started", source);
|
||||
return;
|
||||
}
|
||||
logger.debug("processing [{}]: execute", source);
|
||||
ClusterState previousClusterState = clusterState;
|
||||
if (!previousClusterState.nodes().localNodeMaster() && updateTask.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", source);
|
||||
updateTask.onNoLongerMaster(source);
|
||||
return;
|
||||
}
|
||||
ClusterState newClusterState;
|
||||
long startTimeNS = System.nanoTime();
|
||||
try {
|
||||
newClusterState = updateTask.execute(previousClusterState);
|
||||
} catch (Throwable e) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("failed to execute cluster state update in ").append(executionTime).append(", state:\nversion [").append(previousClusterState.version()).append("], source [").append(source).append("]\n");
|
||||
sb.append(previousClusterState.nodes().prettyPrint());
|
||||
sb.append(previousClusterState.routingTable().prettyPrint());
|
||||
sb.append(previousClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.trace(sb.toString(), e);
|
||||
}
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
updateTask.onFailure(source, e);
|
||||
return;
|
||||
}
|
||||
assert batchResult.executionResults != null;
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
if (updateTask instanceof AckedClusterStateUpdateTask) {
|
||||
ClusterState newClusterState = batchResult.resultingState;
|
||||
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
|
||||
// fail all tasks that have failed and extract those that are waiting for results
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
|
||||
final ClusterStateTaskExecutor.TaskResult executionResult =
|
||||
batchResult.executionResults.get(updateTask.task);
|
||||
executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex));
|
||||
}
|
||||
|
||||
if (previousClusterState == newClusterState) {
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
|
||||
((AckedClusterStateUpdateTask) updateTask).onAllNodesAcked(null);
|
||||
((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null);
|
||||
}
|
||||
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} no change in cluster_state", source, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Discovery.AckListener ackListener = new NoOpAckListener();
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
// only the master controls the version numbers
|
||||
Builder builder = ClusterState.builder(newClusterState).incrementVersion();
|
||||
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
|
||||
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build());
|
||||
}
|
||||
if (previousClusterState.metaData() != newClusterState.metaData()) {
|
||||
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
|
||||
}
|
||||
newClusterState = builder.build();
|
||||
|
||||
if (updateTask instanceof AckedClusterStateUpdateTask) {
|
||||
final AckedClusterStateUpdateTask ackedUpdateTask = (AckedClusterStateUpdateTask) updateTask;
|
||||
if (ackedUpdateTask.ackTimeout() == null || ackedUpdateTask.ackTimeout().millis() == 0) {
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
try {
|
||||
ArrayList<Discovery.AckListener> ackListeners = new ArrayList<>();
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
// only the master controls the version numbers
|
||||
Builder builder = ClusterState.builder(newClusterState).incrementVersion();
|
||||
if (previousClusterState.routingTable() != newClusterState.routingTable()) {
|
||||
builder.routingTable(RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build());
|
||||
}
|
||||
if (previousClusterState.metaData() != newClusterState.metaData()) {
|
||||
builder.metaData(MetaData.builder(newClusterState.metaData()).version(newClusterState.metaData().version() + 1));
|
||||
}
|
||||
newClusterState = builder.build();
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
final AckedClusterStateTaskListener ackedListener = (AckedClusterStateTaskListener) task.listener;
|
||||
if (ackedListener.ackTimeout() == null || ackedListener.ackTimeout().millis() == 0) {
|
||||
ackedListener.onAckTimeout();
|
||||
} else {
|
||||
try {
|
||||
ackListener = new AckCountDownListener(ackedUpdateTask, newClusterState.version(), newClusterState.nodes(), threadPool);
|
||||
ackListeners.add(new AckCountDownListener(ackedListener, newClusterState.version(), newClusterState.nodes(), threadPool));
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Couldn't schedule timeout thread - node might be shutting down", ex);
|
||||
}
|
||||
//timeout straightaway, otherwise we could wait forever as the timeout thread has not started
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
ackedListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, source);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
// the fault detection will detect it as failed as well
|
||||
logger.warn("failed to connect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
updateTask.onFailure(source, t);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
clusterState = newClusterState;
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodesDelta.removedNodes()) {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
|
||||
}
|
||||
}
|
||||
|
||||
updateTask.clusterStateProcessed(source, previousClusterState, newClusterState);
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
final Discovery.AckListener ackListener = new DelegetingAckListener(ackListeners);
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder("cluster state updated, source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.prettyPrint());
|
||||
logger.trace(sb.toString());
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), source);
|
||||
}
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(source, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta();
|
||||
if (nodesDelta.hasChanges() && logger.isInfoEnabled()) {
|
||||
String summary = nodesDelta.shortSummary();
|
||||
if (summary.length() > 0) {
|
||||
logger.info("{}, reason: {}", summary, source);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO, do this in parallel (and wait)
|
||||
for (DiscoveryNode node : nodesDelta.addedNodes()) {
|
||||
if (!nodeRequiresConnection(node)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
transportService.connectToNode(node);
|
||||
} catch (Throwable e) {
|
||||
// the fault detection will detect it as failed as well
|
||||
logger.warn("failed to connect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
// if we are the master, publish the new state to all nodes
|
||||
// we publish here before we send a notification to all the listeners, since if it fails
|
||||
// we don't want to notify
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
clusterState = newClusterState;
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
for (DiscoveryNode node : nodesDelta.removedNodes()) {
|
||||
try {
|
||||
transportService.disconnectFromNode(node);
|
||||
} catch (Throwable e) {
|
||||
logger.warn("failed to disconnect to node [" + node + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
newClusterState.status(ClusterState.ClusterStateStatus.APPLIED);
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
try {
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
try {
|
||||
ackListener.onNodeAck(newClusterState.nodes().localNode(), null);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("error while processing ack for master node [{}]", t, newClusterState.nodes().localNode());
|
||||
}
|
||||
}
|
||||
|
||||
for (UpdateTask<T> task : proccessedListeners) {
|
||||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.getRoutingNodes().prettyPrint());
|
||||
logger.warn(sb.toString(), t);
|
||||
// TODO: do we want to call updateTask.onFailure here?
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class UpdateTask<T> extends SourcePrioritizedRunnable {
|
||||
|
||||
public final T task;
|
||||
public final ClusterStateTaskConfig config;
|
||||
public final ClusterStateTaskExecutor<T> executor;
|
||||
public final ClusterStateTaskListener listener;
|
||||
public final AtomicBoolean processed = new AtomicBoolean();
|
||||
|
||||
UpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
|
||||
super(config.priority(), source);
|
||||
this.task = task;
|
||||
this.config = config;
|
||||
this.executor = executor;
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
runTasksForExecutor(executor);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -729,13 +762,24 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
private static class NoOpAckListener implements Discovery.AckListener {
|
||||
private static class DelegetingAckListener implements Discovery.AckListener {
|
||||
|
||||
final private List<Discovery.AckListener> listeners;
|
||||
|
||||
private DelegetingAckListener(List<Discovery.AckListener> listeners) {
|
||||
this.listeners = listeners;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
|
||||
for (Discovery.AckListener listener : listeners) {
|
||||
listener.onNodeAck(node, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout() {
|
||||
throw new UnsupportedOperationException("no timeout delegation");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -743,20 +787,20 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
private static final ESLogger logger = Loggers.getLogger(AckCountDownListener.class);
|
||||
|
||||
private final AckedClusterStateUpdateTask ackedUpdateTask;
|
||||
private final AckedClusterStateTaskListener ackedTaskListener;
|
||||
private final CountDown countDown;
|
||||
private final DiscoveryNodes nodes;
|
||||
private final long clusterStateVersion;
|
||||
private final Future<?> ackTimeoutCallback;
|
||||
private Throwable lastFailure;
|
||||
|
||||
AckCountDownListener(AckedClusterStateUpdateTask ackedUpdateTask, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
|
||||
this.ackedUpdateTask = ackedUpdateTask;
|
||||
AckCountDownListener(AckedClusterStateTaskListener ackedTaskListener, long clusterStateVersion, DiscoveryNodes nodes, ThreadPool threadPool) {
|
||||
this.ackedTaskListener = ackedTaskListener;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
this.nodes = nodes;
|
||||
int countDown = 0;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (ackedUpdateTask.mustAck(node)) {
|
||||
if (ackedTaskListener.mustAck(node)) {
|
||||
countDown++;
|
||||
}
|
||||
}
|
||||
|
@ -764,7 +808,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
countDown = Math.max(1, countDown);
|
||||
logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion);
|
||||
this.countDown = new CountDown(countDown);
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedUpdateTask.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
|
||||
this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
onTimeout();
|
||||
|
@ -774,7 +818,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
@Override
|
||||
public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) {
|
||||
if (!ackedUpdateTask.mustAck(node)) {
|
||||
if (!ackedTaskListener.mustAck(node)) {
|
||||
//we always wait for the master ack anyway
|
||||
if (!node.equals(nodes.masterNode())) {
|
||||
return;
|
||||
|
@ -790,7 +834,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (countDown.countDown()) {
|
||||
logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion);
|
||||
FutureUtils.cancel(ackTimeoutCallback);
|
||||
ackedUpdateTask.onAllNodesAcked(lastFailure);
|
||||
ackedTaskListener.onAllNodesAcked(lastFailure);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -798,7 +842,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
public void onTimeout() {
|
||||
if (countDown.fastForward()) {
|
||||
logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion);
|
||||
ackedUpdateTask.onAckTimeout();
|
||||
ackedTaskListener.onAckTimeout();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -810,5 +854,4 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
|
||||
/** utility method to fail the given election context under the cluster state thread */
|
||||
private void failContext(final ElectionContext context, final String reason, final Throwable throwable) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(failure [" + reason + "])", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
|
@ -231,7 +231,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
}
|
||||
|
||||
final String source = "zen-disco-join(elected_as_master, [" + pendingMasterJoins + "] joins received)";
|
||||
clusterService.submitStateUpdateTask(source, Priority.IMMEDIATE, new ProcessJoinsTask() {
|
||||
clusterService.submitStateUpdateTask(source, new ProcessJoinsTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
// Take into account the previous known nodes, if they happen not to be available
|
||||
|
@ -280,7 +280,7 @@ public class NodeJoinController extends AbstractComponent {
|
|||
|
||||
/** process all pending joins */
|
||||
private void processJoins(String reason) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(" + reason + ")", Priority.URGENT, new ProcessJoinsTask());
|
||||
clusterService.submitStateUpdateTask("zen-disco-join(" + reason + ")", new ProcessJoinsTask(Priority.URGENT));
|
||||
}
|
||||
|
||||
|
||||
|
@ -356,6 +356,10 @@ public class NodeJoinController extends AbstractComponent {
|
|||
private final List<MembershipAction.JoinCallback> joinCallbacksToRespondTo = new ArrayList<>();
|
||||
private boolean nodeAdded = false;
|
||||
|
||||
public ProcessJoinsTask(Priority priority) {
|
||||
super(priority);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder nodesBuilder;
|
||||
|
|
|
@ -320,7 +320,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
} catch (FailedToCommitClusterStateException t) {
|
||||
// cluster service logs a WARN message
|
||||
logger.debug("failed to publish cluster state version [{}] (not enough nodes acknowledged, min master nodes [{}])", clusterChangedEvent.state().version(), electMaster.minimumMasterNodes());
|
||||
clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-failed-to-publish", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return rejoin(currentState, "failed to publish to min_master_nodes");
|
||||
|
@ -498,7 +498,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
return;
|
||||
}
|
||||
if (localNodeMaster()) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.id());
|
||||
|
@ -538,7 +538,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
// nothing to do here...
|
||||
return;
|
||||
}
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-node_failed(" + node + "), reason " + reason, new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
if (currentState.nodes().get(node.id()) == null) {
|
||||
|
@ -587,7 +587,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
// We only set the new value. If the master doesn't see enough nodes it will revoke it's mastership.
|
||||
return;
|
||||
}
|
||||
clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-minimum_master_nodes_changed", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
// check if we have enough master nodes, if not, we need to move into joining the cluster again
|
||||
|
@ -627,7 +627,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
logger.info("master_left [{}], reason [{}]", cause, masterNode, reason);
|
||||
|
||||
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-master_failed (" + masterNode + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
|
@ -694,7 +694,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
}
|
||||
|
||||
void processNextPendingClusterState(String reason) {
|
||||
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", Priority.URGENT, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("zen-disco-receive(from master [" + reason + "])", new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
|
@ -1059,7 +1059,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
return;
|
||||
}
|
||||
logger.debug("got a ping from another master {}. resolving who should rejoin. current ping count: [{}]", pingRequest.masterNode(), pingsWhileMaster.get());
|
||||
clusterService.submitStateUpdateTask("ping from another master", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("ping from another master", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
|
@ -1114,7 +1114,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
class RejoinClusterRequestHandler implements TransportRequestHandler<RejoinClusterRequest> {
|
||||
@Override
|
||||
public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception {
|
||||
clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
|
|
|
@ -19,22 +19,32 @@
|
|||
package org.elasticsearch.index.fieldvisitor;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A field visitor that allows to load a selection of the stored fields.
|
||||
* A field visitor that allows to load a selection of the stored fields by exact name or by pattern.
|
||||
* Supported pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy".
|
||||
* The Uid field is always loaded.
|
||||
* The class is optimized for source loading as it is a common use case.
|
||||
*/
|
||||
public class CustomFieldsVisitor extends FieldsVisitor {
|
||||
|
||||
private final Set<String> fields;
|
||||
private final List<String> patterns;
|
||||
|
||||
public CustomFieldsVisitor(Set<String> fields, boolean loadSource) {
|
||||
public CustomFieldsVisitor(Set<String> fields, List<String> patterns, boolean loadSource) {
|
||||
super(loadSource);
|
||||
this.fields = fields;
|
||||
this.patterns = patterns;
|
||||
}
|
||||
|
||||
public CustomFieldsVisitor(Set<String> fields, boolean loadSource) {
|
||||
this(fields, Collections.emptyList(), loadSource);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -42,7 +52,14 @@ public class CustomFieldsVisitor extends FieldsVisitor {
|
|||
if (super.needsField(fieldInfo) == Status.YES) {
|
||||
return Status.YES;
|
||||
}
|
||||
|
||||
return fields.contains(fieldInfo.name) ? Status.YES : Status.NO;
|
||||
if (fields.contains(fieldInfo.name)) {
|
||||
return Status.YES;
|
||||
}
|
||||
for (String pattern : patterns) {
|
||||
if (Regex.simpleMatch(pattern, fieldInfo.name)) {
|
||||
return Status.YES;
|
||||
}
|
||||
}
|
||||
return Status.NO;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -351,7 +351,7 @@ public class DocumentMapper implements ToXContent {
|
|||
this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers);
|
||||
|
||||
// finally update for the entire index
|
||||
mapperService.addMappers(objectMappers, fieldMappers);
|
||||
mapperService.addMappers(type, objectMappers, fieldMappers);
|
||||
}
|
||||
|
||||
public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
|
||||
|
|
|
@ -79,6 +79,10 @@ class DocumentParser implements Closeable {
|
|||
}
|
||||
|
||||
private ParsedDocument innerParseDocument(SourceToParse source) throws MapperParsingException {
|
||||
if (docMapper.type().equals(MapperService.DEFAULT_MAPPING)) {
|
||||
throw new IllegalArgumentException("It is forbidden to index into the default mapping [" + MapperService.DEFAULT_MAPPING + "]");
|
||||
}
|
||||
|
||||
ParseContext.InternalParseContext context = cache.get();
|
||||
|
||||
final Mapping mapping = docMapper.mapping();
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.Collection;
|
|||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -56,7 +57,11 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
|
|||
* from the provided fields. If a field already exists, the field type will be updated
|
||||
* to use the new mappers field type.
|
||||
*/
|
||||
public FieldTypeLookup copyAndAddAll(Collection<FieldMapper> newFieldMappers) {
|
||||
public FieldTypeLookup copyAndAddAll(String type, Collection<FieldMapper> newFieldMappers) {
|
||||
Objects.requireNonNull(type, "type must not be null");
|
||||
if (MapperService.DEFAULT_MAPPING.equals(type)) {
|
||||
throw new IllegalArgumentException("Default mappings should not be added to the lookup");
|
||||
}
|
||||
CopyOnWriteHashMap<String, MappedFieldTypeReference> fullName = this.fullNameToFieldType;
|
||||
CopyOnWriteHashMap<String, MappedFieldTypeReference> indexName = this.indexNameToFieldType;
|
||||
|
||||
|
|
|
@ -250,13 +250,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
DocumentMapper oldMapper = mappers.get(mapper.type());
|
||||
|
||||
if (oldMapper != null) {
|
||||
MergeResult result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
|
||||
// simulate first
|
||||
MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes);
|
||||
if (result.hasConflicts()) {
|
||||
// TODO: What should we do???
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.buildConflicts()));
|
||||
}
|
||||
throw new MergeMappingException(result.buildConflicts());
|
||||
}
|
||||
// then apply for real
|
||||
result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
|
||||
assert result.hasConflicts() == false; // we already simulated
|
||||
return oldMapper;
|
||||
} else {
|
||||
List<ObjectMapper> newObjectMappers = new ArrayList<>();
|
||||
|
@ -266,7 +267,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
MapperUtils.collect(mapper.mapping().root, newObjectMappers, newFieldMappers);
|
||||
checkNewMappersCompatibility(newObjectMappers, newFieldMappers, updateAllTypes);
|
||||
addMappers(newObjectMappers, newFieldMappers);
|
||||
addMappers(mapper.type(), newObjectMappers, newFieldMappers);
|
||||
|
||||
for (DocumentTypeListener typeListener : typeListeners) {
|
||||
typeListener.beforeCreate(mapper);
|
||||
|
@ -317,7 +318,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
fieldTypes.checkCompatibility(newFieldMappers, updateAllTypes);
|
||||
}
|
||||
|
||||
protected void addMappers(Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
protected void addMappers(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
|
||||
assert mappingLock.isWriteLockedByCurrentThread();
|
||||
ImmutableOpenMap.Builder<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
|
||||
for (ObjectMapper objectMapper : objectMappers) {
|
||||
|
@ -327,7 +328,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
}
|
||||
this.fullPathObjectMappers = fullPathObjectMappers.build();
|
||||
this.fieldTypes = this.fieldTypes.copyAndAddAll(fieldMappers);
|
||||
this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers);
|
||||
}
|
||||
|
||||
public DocumentMapper parse(String mappingType, CompressedXContent mappingSource, boolean applyDefault) throws MapperParsingException {
|
||||
|
@ -344,10 +345,21 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return mappers.containsKey(mappingType);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of concrete types that have a mapping.
|
||||
* NOTE: this does not return the default mapping.
|
||||
*/
|
||||
public Collection<String> types() {
|
||||
return mappers.keySet();
|
||||
final Set<String> types = new HashSet<>(mappers.keySet());
|
||||
types.remove(DEFAULT_MAPPING);
|
||||
return Collections.unmodifiableSet(types);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link DocumentMapper} for the given type. By using the special
|
||||
* {@value #DEFAULT_MAPPING} type, you can get a {@link DocumentMapper} for
|
||||
* the default mapping.
|
||||
*/
|
||||
public DocumentMapper documentMapper(String type) {
|
||||
return mappers.get(type);
|
||||
}
|
||||
|
|
|
@ -319,6 +319,9 @@ public class TypeParsers {
|
|||
|
||||
for (Map.Entry<String, Object> multiFieldEntry : multiFieldsPropNodes.entrySet()) {
|
||||
String multiFieldName = multiFieldEntry.getKey();
|
||||
if (multiFieldName.contains(".")) {
|
||||
throw new MapperParsingException("Field name [" + multiFieldName + "] which is a multi field of [" + name + "] cannot contain '.'");
|
||||
}
|
||||
if (!(multiFieldEntry.getValue() instanceof Map)) {
|
||||
throw new MapperParsingException("illegal field [" + multiFieldName + "], only fields can be specified inside fields");
|
||||
}
|
||||
|
|
|
@ -128,7 +128,11 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
|||
if (unit == null) {
|
||||
throw new IllegalArgumentException("distance unit must not be null");
|
||||
}
|
||||
this.distance = DistanceUnit.parse(distance, unit, DistanceUnit.DEFAULT);
|
||||
double newDistance = DistanceUnit.parse(distance, unit, DistanceUnit.DEFAULT);
|
||||
if (newDistance <= 0.0) {
|
||||
throw new IllegalArgumentException("distance must be greater than zero");
|
||||
}
|
||||
this.distance = newDistance;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -172,7 +176,7 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
|||
**/
|
||||
public GeoDistanceQueryBuilder optimizeBbox(String optimizeBbox) {
|
||||
if (optimizeBbox == null) {
|
||||
throw new IllegalArgumentException("optimizeBox must not be null");
|
||||
throw new IllegalArgumentException("optimizeBbox must not be null");
|
||||
}
|
||||
switch (optimizeBbox) {
|
||||
case "none":
|
||||
|
|
|
@ -408,7 +408,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
highlightBuilder.innerXContent(builder, EMPTY_PARAMS);
|
||||
highlightBuilder.innerXContent(builder);
|
||||
builder.endObject();
|
||||
this.highlightBuilder = builder.bytes();
|
||||
return this;
|
||||
|
|
|
@ -30,12 +30,12 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.text.StringAndBytesText;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.fieldvisitor.AllFieldsVisitor;
|
||||
import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
|
||||
import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
|
@ -55,13 +55,7 @@ import org.elasticsearch.search.internal.SearchContext;
|
|||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.contentBuilder;
|
||||
|
@ -98,9 +92,7 @@ public class FetchPhase implements SearchPhase {
|
|||
public void execute(SearchContext context) {
|
||||
FieldsVisitor fieldsVisitor;
|
||||
Set<String> fieldNames = null;
|
||||
List<String> extractFieldNames = null;
|
||||
|
||||
boolean loadAllStored = false;
|
||||
List<String> fieldNamePatterns = null;
|
||||
if (!context.hasFieldNames()) {
|
||||
// no fields specified, default to return source if no explicit indication
|
||||
if (!context.hasScriptFields() && !context.hasFetchSourceContext()) {
|
||||
|
@ -111,10 +103,6 @@ public class FetchPhase implements SearchPhase {
|
|||
fieldsVisitor = new FieldsVisitor(context.sourceRequested());
|
||||
} else {
|
||||
for (String fieldName : context.fieldNames()) {
|
||||
if (fieldName.equals("*")) {
|
||||
loadAllStored = true;
|
||||
continue;
|
||||
}
|
||||
if (fieldName.equals(SourceFieldMapper.NAME)) {
|
||||
if (context.hasFetchSourceContext()) {
|
||||
context.fetchSourceContext().fetchSource(true);
|
||||
|
@ -123,32 +111,28 @@ public class FetchPhase implements SearchPhase {
|
|||
}
|
||||
continue;
|
||||
}
|
||||
MappedFieldType fieldType = context.smartNameFieldType(fieldName);
|
||||
if (fieldType == null) {
|
||||
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
|
||||
if (context.getObjectMapper(fieldName) != null) {
|
||||
throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field");
|
||||
if (Regex.isSimpleMatchPattern(fieldName)) {
|
||||
if (fieldNamePatterns == null) {
|
||||
fieldNamePatterns = new ArrayList<>();
|
||||
}
|
||||
fieldNamePatterns.add(fieldName);
|
||||
} else {
|
||||
MappedFieldType fieldType = context.smartNameFieldType(fieldName);
|
||||
if (fieldType == null) {
|
||||
// Only fail if we know it is a object field, missing paths / fields shouldn't fail.
|
||||
if (context.getObjectMapper(fieldName) != null) {
|
||||
throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field");
|
||||
}
|
||||
}
|
||||
} else if (fieldType.stored()) {
|
||||
if (fieldNames == null) {
|
||||
fieldNames = new HashSet<>();
|
||||
}
|
||||
fieldNames.add(fieldType.names().indexName());
|
||||
} else {
|
||||
if (extractFieldNames == null) {
|
||||
extractFieldNames = new ArrayList<>();
|
||||
}
|
||||
extractFieldNames.add(fieldName);
|
||||
fieldNames.add(fieldName);
|
||||
}
|
||||
}
|
||||
if (loadAllStored) {
|
||||
fieldsVisitor = new AllFieldsVisitor(); // load everything, including _source
|
||||
} else if (fieldNames != null) {
|
||||
boolean loadSource = extractFieldNames != null || context.sourceRequested();
|
||||
fieldsVisitor = new CustomFieldsVisitor(fieldNames, loadSource);
|
||||
} else {
|
||||
fieldsVisitor = new FieldsVisitor(extractFieldNames != null || context.sourceRequested());
|
||||
}
|
||||
boolean loadSource = context.sourceRequested();
|
||||
fieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames,
|
||||
fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, loadSource);
|
||||
}
|
||||
|
||||
InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];
|
||||
|
@ -163,9 +147,9 @@ public class FetchPhase implements SearchPhase {
|
|||
try {
|
||||
int rootDocId = findRootDocumentIfNested(context, subReaderContext, subDocId);
|
||||
if (rootDocId != -1) {
|
||||
searchHit = createNestedSearchHit(context, docId, subDocId, rootDocId, extractFieldNames, loadAllStored, fieldNames, subReaderContext);
|
||||
searchHit = createNestedSearchHit(context, docId, subDocId, rootDocId, fieldNames, fieldNamePatterns, subReaderContext);
|
||||
} else {
|
||||
searchHit = createSearchHit(context, fieldsVisitor, docId, subDocId, extractFieldNames, subReaderContext);
|
||||
searchHit = createSearchHit(context, fieldsVisitor, docId, subDocId, subReaderContext);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
|
@ -199,7 +183,7 @@ public class FetchPhase implements SearchPhase {
|
|||
return -1;
|
||||
}
|
||||
|
||||
private InternalSearchHit createSearchHit(SearchContext context, FieldsVisitor fieldsVisitor, int docId, int subDocId, List<String> extractFieldNames, LeafReaderContext subReaderContext) {
|
||||
private InternalSearchHit createSearchHit(SearchContext context, FieldsVisitor fieldsVisitor, int docId, int subDocId, LeafReaderContext subReaderContext) {
|
||||
loadStoredFields(context, subReaderContext, fieldsVisitor, subDocId);
|
||||
fieldsVisitor.postProcess(context.mapperService());
|
||||
|
||||
|
@ -219,45 +203,24 @@ public class FetchPhase implements SearchPhase {
|
|||
typeText = documentMapper.typeText();
|
||||
}
|
||||
InternalSearchHit searchHit = new InternalSearchHit(docId, fieldsVisitor.uid().id(), typeText, searchFields);
|
||||
|
||||
// go over and extract fields that are not mapped / stored
|
||||
// Set _source if requested.
|
||||
SourceLookup sourceLookup = context.lookup().source();
|
||||
sourceLookup.setSegmentAndDocument(subReaderContext, subDocId);
|
||||
if (fieldsVisitor.source() != null) {
|
||||
sourceLookup.setSource(fieldsVisitor.source());
|
||||
}
|
||||
if (extractFieldNames != null) {
|
||||
for (String extractFieldName : extractFieldNames) {
|
||||
List<Object> values = context.lookup().source().extractRawValues(extractFieldName);
|
||||
if (!values.isEmpty()) {
|
||||
if (searchHit.fieldsOrNull() == null) {
|
||||
searchHit.fields(new HashMap<String, SearchHitField>(2));
|
||||
}
|
||||
|
||||
SearchHitField hitField = searchHit.fields().get(extractFieldName);
|
||||
if (hitField == null) {
|
||||
hitField = new InternalSearchHitField(extractFieldName, new ArrayList<>(2));
|
||||
searchHit.fields().put(extractFieldName, hitField);
|
||||
}
|
||||
for (Object value : values) {
|
||||
hitField.values().add(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return searchHit;
|
||||
}
|
||||
|
||||
private InternalSearchHit createNestedSearchHit(SearchContext context, int nestedTopDocId, int nestedSubDocId, int rootSubDocId, List<String> extractFieldNames, boolean loadAllStored, Set<String> fieldNames, LeafReaderContext subReaderContext) throws IOException {
|
||||
private InternalSearchHit createNestedSearchHit(SearchContext context, int nestedTopDocId, int nestedSubDocId, int rootSubDocId, Set<String> fieldNames, List<String> fieldNamePatterns, LeafReaderContext subReaderContext) throws IOException {
|
||||
// Also if highlighting is requested on nested documents we need to fetch the _source from the root document,
|
||||
// otherwise highlighting will attempt to fetch the _source from the nested doc, which will fail,
|
||||
// because the entire _source is only stored with the root document.
|
||||
final FieldsVisitor rootFieldsVisitor = new FieldsVisitor(context.sourceRequested() || extractFieldNames != null || context.highlight() != null);
|
||||
final FieldsVisitor rootFieldsVisitor = new FieldsVisitor(context.sourceRequested() || context.highlight() != null);
|
||||
loadStoredFields(context, subReaderContext, rootFieldsVisitor, rootSubDocId);
|
||||
rootFieldsVisitor.postProcess(context.mapperService());
|
||||
|
||||
Map<String, SearchHitField> searchFields = getSearchFields(context, nestedSubDocId, loadAllStored, fieldNames, subReaderContext);
|
||||
Map<String, SearchHitField> searchFields = getSearchFields(context, nestedSubDocId, fieldNames, fieldNamePatterns, subReaderContext);
|
||||
DocumentMapper documentMapper = context.mapperService().documentMapper(rootFieldsVisitor.uid().type());
|
||||
SourceLookup sourceLookup = context.lookup().source();
|
||||
sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId);
|
||||
|
@ -299,39 +262,14 @@ public class FetchPhase implements SearchPhase {
|
|||
}
|
||||
|
||||
InternalSearchHit searchHit = new InternalSearchHit(nestedTopDocId, rootFieldsVisitor.uid().id(), documentMapper.typeText(), nestedIdentity, searchFields);
|
||||
if (extractFieldNames != null) {
|
||||
for (String extractFieldName : extractFieldNames) {
|
||||
List<Object> values = context.lookup().source().extractRawValues(extractFieldName);
|
||||
if (!values.isEmpty()) {
|
||||
if (searchHit.fieldsOrNull() == null) {
|
||||
searchHit.fields(new HashMap<String, SearchHitField>(2));
|
||||
}
|
||||
|
||||
SearchHitField hitField = searchHit.fields().get(extractFieldName);
|
||||
if (hitField == null) {
|
||||
hitField = new InternalSearchHitField(extractFieldName, new ArrayList<>(2));
|
||||
searchHit.fields().put(extractFieldName, hitField);
|
||||
}
|
||||
for (Object value : values) {
|
||||
hitField.values().add(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return searchHit;
|
||||
}
|
||||
|
||||
private Map<String, SearchHitField> getSearchFields(SearchContext context, int nestedSubDocId, boolean loadAllStored, Set<String> fieldNames, LeafReaderContext subReaderContext) {
|
||||
private Map<String, SearchHitField> getSearchFields(SearchContext context, int nestedSubDocId, Set<String> fieldNames, List<String> fieldNamePatterns, LeafReaderContext subReaderContext) {
|
||||
Map<String, SearchHitField> searchFields = null;
|
||||
if (context.hasFieldNames() && !context.fieldNames().isEmpty()) {
|
||||
FieldsVisitor nestedFieldsVisitor = null;
|
||||
if (loadAllStored) {
|
||||
nestedFieldsVisitor = new AllFieldsVisitor();
|
||||
} else if (fieldNames != null) {
|
||||
nestedFieldsVisitor = new CustomFieldsVisitor(fieldNames, false);
|
||||
}
|
||||
|
||||
FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(fieldNames == null ? Collections.emptySet() : fieldNames,
|
||||
fieldNamePatterns == null ? Collections.emptyList() : fieldNamePatterns, false);
|
||||
if (nestedFieldsVisitor != null) {
|
||||
loadStoredFields(context, subReaderContext, nestedFieldsVisitor, nestedSubDocId);
|
||||
nestedFieldsVisitor.postProcess(context.mapperService());
|
||||
|
|
|
@ -0,0 +1,509 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.highlight;
|
||||
|
||||
import org.apache.lucene.search.highlight.SimpleFragmenter;
|
||||
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This abstract class holds parameters shared by {@link HighlightBuilder} and {@link HighlightBuilder.Field}
|
||||
* and provides the common setters, equality, hashCode calculation and common serialization
|
||||
*/
|
||||
public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterBuilder> {
|
||||
|
||||
protected String[] preTags;
|
||||
|
||||
protected String[] postTags;
|
||||
|
||||
protected Integer fragmentSize;
|
||||
|
||||
protected Integer numOfFragments;
|
||||
|
||||
protected String highlighterType;
|
||||
|
||||
protected String fragmenter;
|
||||
|
||||
protected QueryBuilder highlightQuery;
|
||||
|
||||
protected String order;
|
||||
|
||||
protected Boolean highlightFilter;
|
||||
|
||||
protected Boolean forceSource;
|
||||
|
||||
protected Integer boundaryMaxScan;
|
||||
|
||||
protected char[] boundaryChars;
|
||||
|
||||
protected Integer noMatchSize;
|
||||
|
||||
protected Integer phraseLimit;
|
||||
|
||||
protected Map<String, Object> options;
|
||||
|
||||
protected Boolean requireFieldMatch;
|
||||
|
||||
/**
|
||||
* Set the pre tags that will be used for highlighting.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB preTags(String... preTags) {
|
||||
this.preTags = preTags;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #preTags(String...)}
|
||||
*/
|
||||
public String[] preTags() {
|
||||
return this.preTags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the post tags that will be used for highlighting.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB postTags(String... postTags) {
|
||||
this.postTags = postTags;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #postTags(String...)}
|
||||
*/
|
||||
public String[] postTags() {
|
||||
return this.postTags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the fragment size in characters, defaults to {@link HighlighterParseElement#DEFAULT_FRAGMENT_CHAR_SIZE}
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB fragmentSize(Integer fragmentSize) {
|
||||
this.fragmentSize = fragmentSize;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #fragmentSize(Integer)}
|
||||
*/
|
||||
public Integer fragmentSize() {
|
||||
return this.fragmentSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the number of fragments, defaults to {@link HighlighterParseElement#DEFAULT_NUMBER_OF_FRAGMENTS}
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB numOfFragments(Integer numOfFragments) {
|
||||
this.numOfFragments = numOfFragments;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #numOfFragments(Integer)}
|
||||
*/
|
||||
public Integer numOfFragments() {
|
||||
return this.numOfFragments;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set type of highlighter to use. Out of the box supported types
|
||||
* are <tt>plain</tt>, <tt>fvh</tt> and <tt>postings</tt>.
|
||||
* The default option selected is dependent on the mappings defined for your index.
|
||||
* Details of the different highlighter types are covered in the reference guide.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB highlighterType(String highlighterType) {
|
||||
this.highlighterType = highlighterType;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #highlighterType(String)}
|
||||
*/
|
||||
public String highlighterType() {
|
||||
return this.highlighterType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets what fragmenter to use to break up text that is eligible for highlighting.
|
||||
* This option is only applicable when using the plain highlighterType <tt>highlighter</tt>.
|
||||
* Permitted values are "simple" or "span" relating to {@link SimpleFragmenter} and
|
||||
* {@link SimpleSpanFragmenter} implementations respectively with the default being "span"
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB fragmenter(String fragmenter) {
|
||||
this.fragmenter = fragmenter;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #fragmenter(String)}
|
||||
*/
|
||||
public String fragmenter() {
|
||||
return this.fragmenter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a query to be used for highlighting instead of the search query.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB highlightQuery(QueryBuilder highlightQuery) {
|
||||
this.highlightQuery = highlightQuery;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #highlightQuery(QueryBuilder)}
|
||||
*/
|
||||
public QueryBuilder highlightQuery() {
|
||||
return this.highlightQuery;
|
||||
}
|
||||
|
||||
/**
|
||||
* The order of fragments per field. By default, ordered by the order in the
|
||||
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
|
||||
* by score of the fragments.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB order(String order) {
|
||||
this.order = order;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #order(String)}
|
||||
*/
|
||||
public String order() {
|
||||
return this.order;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set this to true when using the highlighterType <tt>fvh</tt>
|
||||
* and you want to provide highlighting on filter clauses in your
|
||||
* query. Default is <tt>false</tt>.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB highlightFilter(Boolean highlightFilter) {
|
||||
this.highlightFilter = highlightFilter;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #highlightFilter(Boolean)}
|
||||
*/
|
||||
public Boolean highlightFilter() {
|
||||
return this.highlightFilter;
|
||||
}
|
||||
|
||||
/**
|
||||
* When using the highlighterType <tt>fvh</tt> this setting
|
||||
* controls how far to look for boundary characters, and defaults to 20.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB boundaryMaxScan(Integer boundaryMaxScan) {
|
||||
this.boundaryMaxScan = boundaryMaxScan;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #boundaryMaxScan(Integer)}
|
||||
*/
|
||||
public Integer boundaryMaxScan() {
|
||||
return this.boundaryMaxScan;
|
||||
}
|
||||
|
||||
/**
|
||||
* When using the highlighterType <tt>fvh</tt> this setting
|
||||
* defines what constitutes a boundary for highlighting. It’s a single string with
|
||||
* each boundary character defined in it. It defaults to .,!? \t\n
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB boundaryChars(char[] boundaryChars) {
|
||||
this.boundaryChars = boundaryChars;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #boundaryChars(char[])}
|
||||
*/
|
||||
public char[] boundaryChars() {
|
||||
return this.boundaryChars;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to set custom options for custom highlighters.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB options(Map<String, Object> options) {
|
||||
this.options = options;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #options(Map)}
|
||||
*/
|
||||
public Map<String, Object> options() {
|
||||
return this.options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to true to cause a field to be highlighted only if a query matches that field.
|
||||
* Default is false meaning that terms are highlighted on all requested fields regardless
|
||||
* if the query matches specifically on them.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB requireFieldMatch(Boolean requireFieldMatch) {
|
||||
this.requireFieldMatch = requireFieldMatch;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #requireFieldMatch(Boolean)}
|
||||
*/
|
||||
public Boolean requireFieldMatch() {
|
||||
return this.requireFieldMatch;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the size of the fragment to return from the beginning of the field if there are no matches to
|
||||
* highlight and the field doesn't also define noMatchSize.
|
||||
* @param noMatchSize integer to set or null to leave out of request. default is null.
|
||||
* @return this for chaining
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB noMatchSize(Integer noMatchSize) {
|
||||
this.noMatchSize = noMatchSize;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #noMatchSize(Integer)}
|
||||
*/
|
||||
public Integer noMatchSize() {
|
||||
return this.noMatchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum number of phrases the fvh will consider if the field doesn't also define phraseLimit.
|
||||
* @param phraseLimit maximum number of phrases the fvh will consider
|
||||
* @return this for chaining
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB phraseLimit(Integer phraseLimit) {
|
||||
this.phraseLimit = phraseLimit;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #phraseLimit(Integer)}
|
||||
*/
|
||||
public Integer phraseLimit() {
|
||||
return this.noMatchSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Forces the highlighting to highlight fields based on the source even if fields are stored separately.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public HB forceSource(Boolean forceSource) {
|
||||
this.forceSource = forceSource;
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the value set by {@link #forceSource(Boolean)}
|
||||
*/
|
||||
public Boolean forceSource() {
|
||||
return this.forceSource;
|
||||
}
|
||||
|
||||
void commonOptionsToXContent(XContentBuilder builder) throws IOException {
|
||||
if (preTags != null) {
|
||||
builder.array("pre_tags", preTags);
|
||||
}
|
||||
if (postTags != null) {
|
||||
builder.array("post_tags", postTags);
|
||||
}
|
||||
if (fragmentSize != null) {
|
||||
builder.field("fragment_size", fragmentSize);
|
||||
}
|
||||
if (numOfFragments != null) {
|
||||
builder.field("number_of_fragments", numOfFragments);
|
||||
}
|
||||
if (highlighterType != null) {
|
||||
builder.field("type", highlighterType);
|
||||
}
|
||||
if (fragmenter != null) {
|
||||
builder.field("fragmenter", fragmenter);
|
||||
}
|
||||
if (highlightQuery != null) {
|
||||
builder.field("highlight_query", highlightQuery);
|
||||
}
|
||||
if (order != null) {
|
||||
builder.field("order", order);
|
||||
}
|
||||
if (highlightFilter != null) {
|
||||
builder.field("highlight_filter", highlightFilter);
|
||||
}
|
||||
if (boundaryMaxScan != null) {
|
||||
builder.field("boundary_max_scan", boundaryMaxScan);
|
||||
}
|
||||
if (boundaryChars != null) {
|
||||
builder.field("boundary_chars", boundaryChars);
|
||||
}
|
||||
if (options != null && options.size() > 0) {
|
||||
builder.field("options", options);
|
||||
}
|
||||
if (forceSource != null) {
|
||||
builder.field("force_source", forceSource);
|
||||
}
|
||||
if (requireFieldMatch != null) {
|
||||
builder.field("require_field_match", requireFieldMatch);
|
||||
}
|
||||
if (noMatchSize != null) {
|
||||
builder.field("no_match_size", noMatchSize);
|
||||
}
|
||||
if (phraseLimit != null) {
|
||||
builder.field("phrase_limit", phraseLimit);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize,
|
||||
numOfFragments, highlighterType, fragmenter, highlightQuery, order, highlightFilter,
|
||||
forceSource, boundaryMaxScan, Arrays.hashCode(boundaryChars), noMatchSize,
|
||||
phraseLimit, options, requireFieldMatch, doHashCode());
|
||||
}
|
||||
|
||||
/**
|
||||
* internal hashCode calculation to overwrite for the implementing classes.
|
||||
*/
|
||||
protected abstract int doHashCode();
|
||||
|
||||
@Override
|
||||
public final boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
HB other = (HB) obj;
|
||||
return Arrays.equals(preTags, other.preTags) &&
|
||||
Arrays.equals(postTags, other.postTags) &&
|
||||
Objects.equals(fragmentSize, other.fragmentSize) &&
|
||||
Objects.equals(numOfFragments, other.numOfFragments) &&
|
||||
Objects.equals(highlighterType, other.highlighterType) &&
|
||||
Objects.equals(fragmenter, other.fragmenter) &&
|
||||
Objects.equals(highlightQuery, other.highlightQuery) &&
|
||||
Objects.equals(order, other.order) &&
|
||||
Objects.equals(highlightFilter, other.highlightFilter) &&
|
||||
Objects.equals(forceSource, other.forceSource) &&
|
||||
Objects.equals(boundaryMaxScan, other.boundaryMaxScan) &&
|
||||
Arrays.equals(boundaryChars, other.boundaryChars) &&
|
||||
Objects.equals(noMatchSize, other.noMatchSize) &&
|
||||
Objects.equals(phraseLimit, other.phraseLimit) &&
|
||||
Objects.equals(options, other.options) &&
|
||||
Objects.equals(requireFieldMatch, other.requireFieldMatch) &&
|
||||
doEquals(other);
|
||||
}
|
||||
|
||||
/**
|
||||
* internal equals to overwrite for the implementing classes.
|
||||
*/
|
||||
protected abstract boolean doEquals(HB other);
|
||||
|
||||
/**
|
||||
* read common parameters from {@link StreamInput}
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected HB readOptionsFrom(StreamInput in) throws IOException {
|
||||
preTags(in.readOptionalStringArray());
|
||||
postTags(in.readOptionalStringArray());
|
||||
fragmentSize(in.readOptionalVInt());
|
||||
numOfFragments(in.readOptionalVInt());
|
||||
highlighterType(in.readOptionalString());
|
||||
fragmenter(in.readOptionalString());
|
||||
if (in.readBoolean()) {
|
||||
highlightQuery(in.readQuery());
|
||||
}
|
||||
order(in.readOptionalString());
|
||||
highlightFilter(in.readOptionalBoolean());
|
||||
forceSource(in.readOptionalBoolean());
|
||||
boundaryMaxScan(in.readOptionalVInt());
|
||||
if (in.readBoolean()) {
|
||||
boundaryChars(in.readString().toCharArray());
|
||||
}
|
||||
noMatchSize(in.readOptionalVInt());
|
||||
phraseLimit(in.readOptionalVInt());
|
||||
if (in.readBoolean()) {
|
||||
options(in.readMap());
|
||||
}
|
||||
requireFieldMatch(in.readOptionalBoolean());
|
||||
return (HB) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* write common parameters to {@link StreamOutput}
|
||||
*/
|
||||
protected void writeOptionsTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalStringArray(preTags);
|
||||
out.writeOptionalStringArray(postTags);
|
||||
out.writeOptionalVInt(fragmentSize);
|
||||
out.writeOptionalVInt(numOfFragments);
|
||||
out.writeOptionalString(highlighterType);
|
||||
out.writeOptionalString(fragmenter);
|
||||
boolean hasQuery = highlightQuery != null;
|
||||
out.writeBoolean(hasQuery);
|
||||
if (hasQuery) {
|
||||
out.writeQuery(highlightQuery);
|
||||
}
|
||||
out.writeOptionalString(order);
|
||||
out.writeOptionalBoolean(highlightFilter);
|
||||
out.writeOptionalBoolean(forceSource);
|
||||
out.writeOptionalVInt(boundaryMaxScan);
|
||||
boolean hasBounaryChars = boundaryChars != null;
|
||||
out.writeBoolean(hasBounaryChars);
|
||||
if (hasBounaryChars) {
|
||||
out.writeString(String.valueOf(boundaryChars));
|
||||
}
|
||||
out.writeOptionalVInt(noMatchSize);
|
||||
out.writeOptionalVInt(phraseLimit);
|
||||
boolean hasOptions = options != null;
|
||||
out.writeBoolean(hasOptions);
|
||||
if (hasOptions) {
|
||||
out.writeMap(options);
|
||||
}
|
||||
out.writeOptionalBoolean(requireFieldMatch);
|
||||
}
|
||||
}
|
|
@ -19,16 +19,19 @@
|
|||
|
||||
package org.elasticsearch.search.highlight;
|
||||
|
||||
import org.apache.lucene.search.highlight.SimpleFragmenter;
|
||||
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A builder for search highlighting. Settings can control how large fields
|
||||
|
@ -36,46 +39,14 @@ import java.util.Map;
|
|||
*
|
||||
* @see org.elasticsearch.search.builder.SearchSourceBuilder#highlight()
|
||||
*/
|
||||
public class HighlightBuilder implements ToXContent {
|
||||
public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilder> implements Writeable<HighlightBuilder>, ToXContent {
|
||||
|
||||
private List<Field> fields;
|
||||
public static final HighlightBuilder PROTOTYPE = new HighlightBuilder();
|
||||
|
||||
private String tagsSchema;
|
||||
|
||||
private Boolean highlightFilter;
|
||||
|
||||
private Integer fragmentSize;
|
||||
|
||||
private Integer numOfFragments;
|
||||
|
||||
private String[] preTags;
|
||||
|
||||
private String[] postTags;
|
||||
|
||||
private String order;
|
||||
private final List<Field> fields = new ArrayList<>();
|
||||
|
||||
private String encoder;
|
||||
|
||||
private Boolean requireFieldMatch;
|
||||
|
||||
private Integer boundaryMaxScan;
|
||||
|
||||
private char[] boundaryChars;
|
||||
|
||||
private String highlighterType;
|
||||
|
||||
private String fragmenter;
|
||||
|
||||
private QueryBuilder highlightQuery;
|
||||
|
||||
private Integer noMatchSize;
|
||||
|
||||
private Integer phraseLimit;
|
||||
|
||||
private Map<String, Object> options;
|
||||
|
||||
private Boolean forceSource;
|
||||
|
||||
private boolean useExplicitFieldOrder = false;
|
||||
|
||||
/**
|
||||
|
@ -85,14 +56,9 @@ public class HighlightBuilder implements ToXContent {
|
|||
* @param name The field to highlight
|
||||
*/
|
||||
public HighlightBuilder field(String name) {
|
||||
if (fields == null) {
|
||||
fields = new ArrayList<>();
|
||||
}
|
||||
fields.add(new Field(name));
|
||||
return this;
|
||||
return field(new Field(name));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Adds a field to be highlighted with a provided fragment size (in characters), and
|
||||
* default number of fragments of 5.
|
||||
|
@ -101,11 +67,7 @@ public class HighlightBuilder implements ToXContent {
|
|||
* @param fragmentSize The size of a fragment in characters
|
||||
*/
|
||||
public HighlightBuilder field(String name, int fragmentSize) {
|
||||
if (fields == null) {
|
||||
fields = new ArrayList<>();
|
||||
}
|
||||
fields.add(new Field(name).fragmentSize(fragmentSize));
|
||||
return this;
|
||||
return field(new Field(name).fragmentSize(fragmentSize));
|
||||
}
|
||||
|
||||
|
||||
|
@ -118,14 +80,9 @@ public class HighlightBuilder implements ToXContent {
|
|||
* @param numberOfFragments The (maximum) number of fragments
|
||||
*/
|
||||
public HighlightBuilder field(String name, int fragmentSize, int numberOfFragments) {
|
||||
if (fields == null) {
|
||||
fields = new ArrayList<>();
|
||||
}
|
||||
fields.add(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments));
|
||||
return this;
|
||||
return field(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Adds a field to be highlighted with a provided fragment size (in characters), and
|
||||
* a provided (maximum) number of fragments.
|
||||
|
@ -136,56 +93,38 @@ public class HighlightBuilder implements ToXContent {
|
|||
* @param fragmentOffset The offset from the start of the fragment to the start of the highlight
|
||||
*/
|
||||
public HighlightBuilder field(String name, int fragmentSize, int numberOfFragments, int fragmentOffset) {
|
||||
if (fields == null) {
|
||||
fields = new ArrayList<>();
|
||||
}
|
||||
fields.add(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments)
|
||||
return field(new Field(name).fragmentSize(fragmentSize).numOfFragments(numberOfFragments)
|
||||
.fragmentOffset(fragmentOffset));
|
||||
return this;
|
||||
}
|
||||
|
||||
public HighlightBuilder field(Field field) {
|
||||
if (fields == null) {
|
||||
fields = new ArrayList<>();
|
||||
}
|
||||
fields.add(field);
|
||||
return this;
|
||||
}
|
||||
|
||||
public List<Field> fields() {
|
||||
return this.fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a tag scheme that encapsulates a built in pre and post tags. The allows schemes
|
||||
* Set a tag scheme that encapsulates a built in pre and post tags. The allowed schemes
|
||||
* are <tt>styled</tt> and <tt>default</tt>.
|
||||
*
|
||||
* @param schemaName The tag scheme name
|
||||
*/
|
||||
public HighlightBuilder tagsSchema(String schemaName) {
|
||||
this.tagsSchema = schemaName;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set this to true when using the highlighterType <tt>fvh</tt>
|
||||
* and you want to provide highlighting on filter clauses in your
|
||||
* query. Default is <tt>false</tt>.
|
||||
*/
|
||||
public HighlightBuilder highlightFilter(boolean highlightFilter) {
|
||||
this.highlightFilter = highlightFilter;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the size of a fragment in characters (defaults to 100)
|
||||
*/
|
||||
public HighlightBuilder fragmentSize(Integer fragmentSize) {
|
||||
this.fragmentSize = fragmentSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum number of fragments returned
|
||||
*/
|
||||
public HighlightBuilder numOfFragments(Integer numOfFragments) {
|
||||
this.numOfFragments = numOfFragments;
|
||||
switch (schemaName) {
|
||||
case "default":
|
||||
preTags(HighlighterParseElement.DEFAULT_PRE_TAGS);
|
||||
postTags(HighlighterParseElement.DEFAULT_POST_TAGS);
|
||||
break;
|
||||
case "styled":
|
||||
preTags(HighlighterParseElement.STYLED_PRE_TAG);
|
||||
postTags(HighlighterParseElement.STYLED_POST_TAGS);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown tag schema ["+ schemaName +"]");
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -201,125 +140,10 @@ public class HighlightBuilder implements ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Explicitly set the pre tags that will be used for highlighting.
|
||||
* Getter for {@link #encoder(String)}
|
||||
*/
|
||||
public HighlightBuilder preTags(String... preTags) {
|
||||
this.preTags = preTags;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitly set the post tags that will be used for highlighting.
|
||||
*/
|
||||
public HighlightBuilder postTags(String... postTags) {
|
||||
this.postTags = postTags;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The order of fragments per field. By default, ordered by the order in the
|
||||
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
|
||||
* by score of the fragments.
|
||||
*/
|
||||
public HighlightBuilder order(String order) {
|
||||
this.order = order;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to true to cause a field to be highlighted only if a query matches that field.
|
||||
* Default is false meaning that terms are highlighted on all requested fields regardless
|
||||
* if the query matches specifically on them.
|
||||
*/
|
||||
public HighlightBuilder requireFieldMatch(boolean requireFieldMatch) {
|
||||
this.requireFieldMatch = requireFieldMatch;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* When using the highlighterType <tt>fvh</tt> this setting
|
||||
* controls how far to look for boundary characters, and defaults to 20.
|
||||
*/
|
||||
public HighlightBuilder boundaryMaxScan(Integer boundaryMaxScan) {
|
||||
this.boundaryMaxScan = boundaryMaxScan;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* When using the highlighterType <tt>fvh</tt> this setting
|
||||
* defines what constitutes a boundary for highlighting. It’s a single string with
|
||||
* each boundary character defined in it. It defaults to .,!? \t\n
|
||||
*/
|
||||
public HighlightBuilder boundaryChars(char[] boundaryChars) {
|
||||
this.boundaryChars = boundaryChars;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set type of highlighter to use. Out of the box supported types
|
||||
* are <tt>plain</tt>, <tt>fvh</tt> and <tt>postings</tt>.
|
||||
* The default option selected is dependent on the mappings defined for your index.
|
||||
* Details of the different highlighter types are covered in the reference guide.
|
||||
*/
|
||||
public HighlightBuilder highlighterType(String highlighterType) {
|
||||
this.highlighterType = highlighterType;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets what fragmenter to use to break up text that is eligible for highlighting.
|
||||
* This option is only applicable when using the plain highlighterType <tt>highlighter</tt>.
|
||||
* Permitted values are "simple" or "span" relating to {@link SimpleFragmenter} and
|
||||
* {@link SimpleSpanFragmenter} implementations respectively with the default being "span"
|
||||
*/
|
||||
public HighlightBuilder fragmenter(String fragmenter) {
|
||||
this.fragmenter = fragmenter;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a query to be used for highlighting all fields instead of the search query.
|
||||
*/
|
||||
public HighlightBuilder highlightQuery(QueryBuilder highlightQuery) {
|
||||
this.highlightQuery = highlightQuery;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the size of the fragment to return from the beginning of the field if there are no matches to
|
||||
* highlight and the field doesn't also define noMatchSize.
|
||||
* @param noMatchSize integer to set or null to leave out of request. default is null.
|
||||
* @return this for chaining
|
||||
*/
|
||||
public HighlightBuilder noMatchSize(Integer noMatchSize) {
|
||||
this.noMatchSize = noMatchSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum number of phrases the fvh will consider if the field doesn't also define phraseLimit.
|
||||
* @param phraseLimit maximum number of phrases the fvh will consider
|
||||
* @return this for chaining
|
||||
*/
|
||||
public HighlightBuilder phraseLimit(Integer phraseLimit) {
|
||||
this.phraseLimit = phraseLimit;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to set custom options for custom highlighters.
|
||||
*/
|
||||
public HighlightBuilder options(Map<String, Object> options) {
|
||||
this.options = options;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Forces the highlighting to highlight fields based on the source even if fields are stored separately.
|
||||
*/
|
||||
public HighlightBuilder forceSource(boolean forceSource) {
|
||||
this.forceSource = forceSource;
|
||||
return this;
|
||||
public String encoder() {
|
||||
return this.encoder;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -331,71 +155,29 @@ public class HighlightBuilder implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets value set with {@link #useExplicitFieldOrder(boolean)}
|
||||
*/
|
||||
public Boolean useExplicitFieldOrder() {
|
||||
return this.useExplicitFieldOrder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("highlight");
|
||||
innerXContent(builder, params);
|
||||
innerXContent(builder);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
||||
public void innerXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (tagsSchema != null) {
|
||||
builder.field("tags_schema", tagsSchema);
|
||||
}
|
||||
if (preTags != null) {
|
||||
builder.array("pre_tags", preTags);
|
||||
}
|
||||
if (postTags != null) {
|
||||
builder.array("post_tags", postTags);
|
||||
}
|
||||
if (order != null) {
|
||||
builder.field("order", order);
|
||||
}
|
||||
if (highlightFilter != null) {
|
||||
builder.field("highlight_filter", highlightFilter);
|
||||
}
|
||||
if (fragmentSize != null) {
|
||||
builder.field("fragment_size", fragmentSize);
|
||||
}
|
||||
if (numOfFragments != null) {
|
||||
builder.field("number_of_fragments", numOfFragments);
|
||||
}
|
||||
public void innerXContent(XContentBuilder builder) throws IOException {
|
||||
// first write common options
|
||||
commonOptionsToXContent(builder);
|
||||
// special options for top-level highlighter
|
||||
if (encoder != null) {
|
||||
builder.field("encoder", encoder);
|
||||
}
|
||||
if (requireFieldMatch != null) {
|
||||
builder.field("require_field_match", requireFieldMatch);
|
||||
}
|
||||
if (boundaryMaxScan != null) {
|
||||
builder.field("boundary_max_scan", boundaryMaxScan);
|
||||
}
|
||||
if (boundaryChars != null) {
|
||||
builder.field("boundary_chars", boundaryChars);
|
||||
}
|
||||
if (highlighterType != null) {
|
||||
builder.field("type", highlighterType);
|
||||
}
|
||||
if (fragmenter != null) {
|
||||
builder.field("fragmenter", fragmenter);
|
||||
}
|
||||
if (highlightQuery != null) {
|
||||
builder.field("highlight_query", highlightQuery);
|
||||
}
|
||||
if (noMatchSize != null) {
|
||||
builder.field("no_match_size", noMatchSize);
|
||||
}
|
||||
if (phraseLimit != null) {
|
||||
builder.field("phrase_limit", phraseLimit);
|
||||
}
|
||||
if (options != null && options.size() > 0) {
|
||||
builder.field("options", options);
|
||||
}
|
||||
if (forceSource != null) {
|
||||
builder.field("force_source", forceSource);
|
||||
}
|
||||
if (fields != null) {
|
||||
if (fields.size() > 0) {
|
||||
if (useExplicitFieldOrder) {
|
||||
builder.startArray("fields");
|
||||
} else {
|
||||
|
@ -405,63 +187,7 @@ public class HighlightBuilder implements ToXContent {
|
|||
if (useExplicitFieldOrder) {
|
||||
builder.startObject();
|
||||
}
|
||||
builder.startObject(field.name());
|
||||
if (field.preTags != null) {
|
||||
builder.field("pre_tags", field.preTags);
|
||||
}
|
||||
if (field.postTags != null) {
|
||||
builder.field("post_tags", field.postTags);
|
||||
}
|
||||
if (field.fragmentSize != -1) {
|
||||
builder.field("fragment_size", field.fragmentSize);
|
||||
}
|
||||
if (field.numOfFragments != -1) {
|
||||
builder.field("number_of_fragments", field.numOfFragments);
|
||||
}
|
||||
if (field.fragmentOffset != -1) {
|
||||
builder.field("fragment_offset", field.fragmentOffset);
|
||||
}
|
||||
if (field.highlightFilter != null) {
|
||||
builder.field("highlight_filter", field.highlightFilter);
|
||||
}
|
||||
if (field.order != null) {
|
||||
builder.field("order", field.order);
|
||||
}
|
||||
if (field.requireFieldMatch != null) {
|
||||
builder.field("require_field_match", field.requireFieldMatch);
|
||||
}
|
||||
if (field.boundaryMaxScan != -1) {
|
||||
builder.field("boundary_max_scan", field.boundaryMaxScan);
|
||||
}
|
||||
if (field.boundaryChars != null) {
|
||||
builder.field("boundary_chars", field.boundaryChars);
|
||||
}
|
||||
if (field.highlighterType != null) {
|
||||
builder.field("type", field.highlighterType);
|
||||
}
|
||||
if (field.fragmenter != null) {
|
||||
builder.field("fragmenter", field.fragmenter);
|
||||
}
|
||||
if (field.highlightQuery != null) {
|
||||
builder.field("highlight_query", field.highlightQuery);
|
||||
}
|
||||
if (field.noMatchSize != null) {
|
||||
builder.field("no_match_size", field.noMatchSize);
|
||||
}
|
||||
if (field.matchedFields != null) {
|
||||
builder.field("matched_fields", field.matchedFields);
|
||||
}
|
||||
if (field.phraseLimit != null) {
|
||||
builder.field("phrase_limit", field.phraseLimit);
|
||||
}
|
||||
if (field.options != null && field.options.size() > 0) {
|
||||
builder.field("options", field.options);
|
||||
}
|
||||
if (field.forceSource != null) {
|
||||
builder.field("force_source", field.forceSource);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
field.innerXContent(builder);
|
||||
if (useExplicitFieldOrder) {
|
||||
builder.endObject();
|
||||
}
|
||||
|
@ -474,26 +200,62 @@ public class HighlightBuilder implements ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
public static class Field {
|
||||
final String name;
|
||||
String[] preTags;
|
||||
String[] postTags;
|
||||
int fragmentSize = -1;
|
||||
@Override
|
||||
public final String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.prettyPrint();
|
||||
toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
return builder.string();
|
||||
} catch (Exception e) {
|
||||
return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}";
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(encoder, useExplicitFieldOrder, fields);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(HighlightBuilder other) {
|
||||
return Objects.equals(encoder, other.encoder) &&
|
||||
Objects.equals(useExplicitFieldOrder, other.useExplicitFieldOrder) &&
|
||||
Objects.equals(fields, other.fields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public HighlightBuilder readFrom(StreamInput in) throws IOException {
|
||||
HighlightBuilder highlightBuilder = new HighlightBuilder();
|
||||
highlightBuilder.readOptionsFrom(in)
|
||||
.encoder(in.readOptionalString())
|
||||
.useExplicitFieldOrder(in.readBoolean());
|
||||
int fields = in.readVInt();
|
||||
for (int i = 0; i < fields; i++) {
|
||||
highlightBuilder.field(Field.PROTOTYPE.readFrom(in));
|
||||
}
|
||||
return highlightBuilder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
writeOptionsTo(out);
|
||||
out.writeOptionalString(encoder);
|
||||
out.writeBoolean(useExplicitFieldOrder);
|
||||
out.writeVInt(fields.size());
|
||||
for (int i = 0; i < fields.size(); i++) {
|
||||
fields.get(i).writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Field extends AbstractHighlighterBuilder<Field> implements Writeable<Field> {
|
||||
static final Field PROTOTYPE = new Field("_na_");
|
||||
|
||||
private final String name;
|
||||
|
||||
int fragmentOffset = -1;
|
||||
int numOfFragments = -1;
|
||||
Boolean highlightFilter;
|
||||
String order;
|
||||
Boolean requireFieldMatch;
|
||||
int boundaryMaxScan = -1;
|
||||
char[] boundaryChars;
|
||||
String highlighterType;
|
||||
String fragmenter;
|
||||
QueryBuilder highlightQuery;
|
||||
Integer noMatchSize;
|
||||
|
||||
String[] matchedFields;
|
||||
Integer phraseLimit;
|
||||
Map<String, Object> options;
|
||||
Boolean forceSource;
|
||||
|
||||
public Field(String name) {
|
||||
this.name = name;
|
||||
|
@ -503,118 +265,11 @@ public class HighlightBuilder implements ToXContent {
|
|||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitly set the pre tags for this field that will be used for highlighting.
|
||||
* This overrides global settings set by {@link HighlightBuilder#preTags(String...)}.
|
||||
*/
|
||||
public Field preTags(String... preTags) {
|
||||
this.preTags = preTags;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitly set the post tags for this field that will be used for highlighting.
|
||||
* This overrides global settings set by {@link HighlightBuilder#postTags(String...)}.
|
||||
*/
|
||||
public Field postTags(String... postTags) {
|
||||
this.postTags = postTags;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Field fragmentSize(int fragmentSize) {
|
||||
this.fragmentSize = fragmentSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Field fragmentOffset(int fragmentOffset) {
|
||||
this.fragmentOffset = fragmentOffset;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Field numOfFragments(int numOfFragments) {
|
||||
this.numOfFragments = numOfFragments;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Field highlightFilter(boolean highlightFilter) {
|
||||
this.highlightFilter = highlightFilter;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* The order of fragments per field. By default, ordered by the order in the
|
||||
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
|
||||
* by score of the fragments.
|
||||
* This overrides global settings set by {@link HighlightBuilder#order(String)}.
|
||||
*/
|
||||
public Field order(String order) {
|
||||
this.order = order;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Field requireFieldMatch(boolean requireFieldMatch) {
|
||||
this.requireFieldMatch = requireFieldMatch;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Field boundaryMaxScan(int boundaryMaxScan) {
|
||||
this.boundaryMaxScan = boundaryMaxScan;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Field boundaryChars(char[] boundaryChars) {
|
||||
this.boundaryChars = boundaryChars;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set type of highlighter to use. Out of the box supported types
|
||||
* are <tt>plain</tt>, <tt>fvh</tt> and <tt>postings</tt>.
|
||||
* This overrides global settings set by {@link HighlightBuilder#highlighterType(String)}.
|
||||
*/
|
||||
public Field highlighterType(String highlighterType) {
|
||||
this.highlighterType = highlighterType;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets what fragmenter to use to break up text that is eligible for highlighting.
|
||||
* This option is only applicable when using plain / normal highlighter.
|
||||
* This overrides global settings set by {@link HighlightBuilder#fragmenter(String)}.
|
||||
*/
|
||||
public Field fragmenter(String fragmenter) {
|
||||
this.fragmenter = fragmenter;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a query to use for highlighting this field instead of the search query.
|
||||
*/
|
||||
public Field highlightQuery(QueryBuilder highlightQuery) {
|
||||
this.highlightQuery = highlightQuery;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the size of the fragment to return from the beginning of the field if there are no matches to
|
||||
* highlight.
|
||||
* @param noMatchSize integer to set or null to leave out of request. default is null.
|
||||
* @return this for chaining
|
||||
*/
|
||||
public Field noMatchSize(Integer noMatchSize) {
|
||||
this.noMatchSize = noMatchSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allows to set custom options for custom highlighters.
|
||||
* This overrides global settings set by {@link HighlightBuilder#options(Map)}.
|
||||
*/
|
||||
public Field options(Map<String, Object> options) {
|
||||
this.options = options;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the matched fields to highlight against this field data. Default to null, meaning just
|
||||
* the named field. If you provide a list of fields here then don't forget to include name as
|
||||
|
@ -625,24 +280,47 @@ public class HighlightBuilder implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum number of phrases the fvh will consider.
|
||||
* @param phraseLimit maximum number of phrases the fvh will consider
|
||||
* @return this for chaining
|
||||
*/
|
||||
public Field phraseLimit(Integer phraseLimit) {
|
||||
this.phraseLimit = phraseLimit;
|
||||
return this;
|
||||
public void innerXContent(XContentBuilder builder) throws IOException {
|
||||
builder.startObject(name);
|
||||
// write common options
|
||||
commonOptionsToXContent(builder);
|
||||
// write special field-highlighter options
|
||||
if (fragmentOffset != -1) {
|
||||
builder.field("fragment_offset", fragmentOffset);
|
||||
}
|
||||
if (matchedFields != null) {
|
||||
builder.field("matched_fields", matchedFields);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Forces the highlighting to highlight this field based on the source even if this field is stored separately.
|
||||
*/
|
||||
public Field forceSource(boolean forceSource) {
|
||||
this.forceSource = forceSource;
|
||||
return this;
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(name, fragmentOffset, Arrays.hashCode(matchedFields));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(Field other) {
|
||||
return Objects.equals(name, other.name) &&
|
||||
Objects.equals(fragmentOffset, other.fragmentOffset) &&
|
||||
Arrays.equals(matchedFields, other.matchedFields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Field readFrom(StreamInput in) throws IOException {
|
||||
Field field = new Field(in.readString());
|
||||
field.fragmentOffset(in.readVInt());
|
||||
field.matchedFields(in.readOptionalStringArray());
|
||||
field.readOptionsFrom(in);
|
||||
return field;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
out.writeVInt(fragmentOffset);
|
||||
out.writeOptionalStringArray(matchedFields);
|
||||
writeOptionsTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,16 +52,38 @@ import java.util.Set;
|
|||
*/
|
||||
public class HighlighterParseElement implements SearchParseElement {
|
||||
|
||||
private static final String[] DEFAULT_PRE_TAGS = new String[]{"<em>"};
|
||||
private static final String[] DEFAULT_POST_TAGS = new String[]{"</em>"};
|
||||
|
||||
private static final String[] STYLED_PRE_TAG = {
|
||||
/** default for whether to highlight fields based on the source even if stored separately */
|
||||
public static final boolean DEFAULT_FORCE_SOURCE = false;
|
||||
/** default for whether a field should be highlighted only if a query matches that field */
|
||||
public static final boolean DEFAULT_REQUIRE_FIELD_MATCH = true;
|
||||
/** default for whether <tt>fvh</tt> should provide highlighting on filter clauses */
|
||||
public static final boolean DEFAULT_HIGHLIGHT_FILTER = false;
|
||||
/** default for highlight fragments being ordered by score */
|
||||
public static final boolean DEFAULT_SCORE_ORDERED = false;
|
||||
/** the default encoder setting */
|
||||
public static final String DEFAULT_ENCODER = "default";
|
||||
/** default for the maximum number of phrases the fvh will consider */
|
||||
public static final int DEFAULT_PHRASE_LIMIT = 256;
|
||||
/** default for fragment size when there are no matches */
|
||||
public static final int DEFAULT_NO_MATCH_SIZE = 0;
|
||||
/** the default number of fragments for highlighting */
|
||||
public static final int DEFAULT_NUMBER_OF_FRAGMENTS = 5;
|
||||
/** the default number of fragments size in characters */
|
||||
public static final int DEFAULT_FRAGMENT_CHAR_SIZE = 100;
|
||||
/** the default opening tag */
|
||||
public static final String[] DEFAULT_PRE_TAGS = new String[]{"<em>"};
|
||||
/** the default closing tag */
|
||||
public static final String[] DEFAULT_POST_TAGS = new String[]{"</em>"};
|
||||
|
||||
/** the default opening tags when <tt>tag_schema = "styled"</tt> */
|
||||
public static final String[] STYLED_PRE_TAG = {
|
||||
"<em class=\"hlt1\">", "<em class=\"hlt2\">", "<em class=\"hlt3\">",
|
||||
"<em class=\"hlt4\">", "<em class=\"hlt5\">", "<em class=\"hlt6\">",
|
||||
"<em class=\"hlt7\">", "<em class=\"hlt8\">", "<em class=\"hlt9\">",
|
||||
"<em class=\"hlt10\">"
|
||||
};
|
||||
private static final String[] STYLED_POST_TAGS = {"</em>"};
|
||||
/** the default closing tags when <tt>tag_schema = "styled"</tt> */
|
||||
public static final String[] STYLED_POST_TAGS = {"</em>"};
|
||||
|
||||
@Override
|
||||
public void parse(XContentParser parser, SearchContext context) throws Exception {
|
||||
|
@ -78,11 +100,11 @@ public class HighlighterParseElement implements SearchParseElement {
|
|||
final List<Tuple<String, SearchContextHighlight.FieldOptions.Builder>> fieldsOptions = new ArrayList<>();
|
||||
|
||||
final SearchContextHighlight.FieldOptions.Builder globalOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder()
|
||||
.preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(false).highlightFilter(false)
|
||||
.requireFieldMatch(true).forceSource(false).fragmentCharSize(100).numberOfFragments(5)
|
||||
.encoder("default").boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN)
|
||||
.preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(DEFAULT_SCORE_ORDERED).highlightFilter(DEFAULT_HIGHLIGHT_FILTER)
|
||||
.requireFieldMatch(DEFAULT_REQUIRE_FIELD_MATCH).forceSource(DEFAULT_FORCE_SOURCE).fragmentCharSize(DEFAULT_FRAGMENT_CHAR_SIZE).numberOfFragments(DEFAULT_NUMBER_OF_FRAGMENTS)
|
||||
.encoder(DEFAULT_ENCODER).boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN)
|
||||
.boundaryChars(SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS)
|
||||
.noMatchSize(0).phraseLimit(256);
|
||||
.noMatchSize(DEFAULT_NO_MATCH_SIZE).phraseLimit(DEFAULT_PHRASE_LIMIT);
|
||||
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.bulk;
|
|||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
|
@ -36,9 +37,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
public class BulkRequestTests extends ESTestCase {
|
||||
public void testSimpleBulk1() throws Exception {
|
||||
|
@ -171,4 +170,39 @@ public class BulkRequestTests extends ESTestCase {
|
|||
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
|
||||
assertThat(bulkRequest.numberOfActions(), equalTo(9));
|
||||
}
|
||||
|
||||
// issue 7361
|
||||
public void testBulkRequestWithRefresh() throws Exception {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
// We force here a "id is missing" validation error
|
||||
bulkRequest.add(new DeleteRequest("index", "type", null).refresh(true));
|
||||
// We force here a "type is missing" validation error
|
||||
bulkRequest.add(new DeleteRequest("index", null, "id"));
|
||||
bulkRequest.add(new DeleteRequest("index", "type", "id").refresh(true));
|
||||
bulkRequest.add(new UpdateRequest("index", "type", "id").doc("{}").refresh(true));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "id").source("{}").refresh(true));
|
||||
ActionRequestValidationException validate = bulkRequest.validate();
|
||||
assertThat(validate, notNullValue());
|
||||
assertThat(validate.validationErrors(), not(empty()));
|
||||
assertThat(validate.validationErrors(), contains(
|
||||
"Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.",
|
||||
"id is missing",
|
||||
"type is missing",
|
||||
"Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.",
|
||||
"Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead.",
|
||||
"Refresh is not supported on an item request, set the refresh flag on the BulkRequest instead."));
|
||||
}
|
||||
|
||||
// issue 15120
|
||||
public void testBulkNoSource() throws Exception {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new UpdateRequest("index", "type", "id"));
|
||||
bulkRequest.add(new IndexRequest("index", "type", "id"));
|
||||
ActionRequestValidationException validate = bulkRequest.validate();
|
||||
assertThat(validate, notNullValue());
|
||||
assertThat(validate.validationErrors(), not(empty()));
|
||||
assertThat(validate.validationErrors(), contains(
|
||||
"script or doc is missing",
|
||||
"source is missing"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,23 +43,17 @@ import org.elasticsearch.test.MockLogAppender;
|
|||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -711,32 +705,146 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
|
||||
BlockingTask block = new BlockingTask();
|
||||
clusterService.submitStateUpdateTask("test", Priority.IMMEDIATE, block);
|
||||
BlockingTask block = new BlockingTask(Priority.IMMEDIATE);
|
||||
clusterService.submitStateUpdateTask("test", block);
|
||||
int taskCount = randomIntBetween(5, 20);
|
||||
Priority[] priorities = Priority.values();
|
||||
|
||||
// will hold all the tasks in the order in which they were executed
|
||||
List<PrioritiezedTask> tasks = new ArrayList<>(taskCount);
|
||||
List<PrioritizedTask> tasks = new ArrayList<>(taskCount);
|
||||
CountDownLatch latch = new CountDownLatch(taskCount);
|
||||
for (int i = 0; i < taskCount; i++) {
|
||||
Priority priority = priorities[randomIntBetween(0, priorities.length - 1)];
|
||||
clusterService.submitStateUpdateTask("test", priority, new PrioritiezedTask(priority, latch, tasks));
|
||||
clusterService.submitStateUpdateTask("test", new PrioritizedTask(priority, latch, tasks));
|
||||
}
|
||||
|
||||
block.release();
|
||||
latch.await();
|
||||
|
||||
Priority prevPriority = null;
|
||||
for (PrioritiezedTask task : tasks) {
|
||||
for (PrioritizedTask task : tasks) {
|
||||
if (prevPriority == null) {
|
||||
prevPriority = task.priority;
|
||||
prevPriority = task.priority();
|
||||
} else {
|
||||
assertThat(task.priority.sameOrAfter(prevPriority), is(true));
|
||||
assertThat(task.priority().sameOrAfter(prevPriority), is(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testClusterStateBatchedUpdates() throws InterruptedException {
|
||||
Settings settings = settingsBuilder()
|
||||
.put("discovery.type", "local")
|
||||
.build();
|
||||
internalCluster().startNode(settings);
|
||||
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
|
||||
|
||||
AtomicInteger counter = new AtomicInteger();
|
||||
class Task {
|
||||
private AtomicBoolean state = new AtomicBoolean();
|
||||
|
||||
public void execute() {
|
||||
if (!state.compareAndSet(false, true)) {
|
||||
throw new IllegalStateException();
|
||||
} else {
|
||||
counter.incrementAndGet();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class TaskExecutor implements ClusterStateTaskExecutor<Task> {
|
||||
private AtomicInteger counter = new AtomicInteger();
|
||||
|
||||
@Override
|
||||
public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception {
|
||||
tasks.forEach(task -> task.execute());
|
||||
counter.addAndGet(tasks.size());
|
||||
return BatchResult.<Task>builder().successes(tasks).build(currentState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
int numberOfThreads = randomIntBetween(2, 256);
|
||||
int tasksSubmittedPerThread = randomIntBetween(1, 1024);
|
||||
|
||||
ConcurrentMap<String, AtomicInteger> counters = new ConcurrentHashMap<>();
|
||||
CountDownLatch latch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
|
||||
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
assert false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
counters.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
|
||||
latch.countDown();
|
||||
}
|
||||
};
|
||||
|
||||
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
|
||||
List<TaskExecutor> executors = new ArrayList<>();
|
||||
for (int i = 0; i < numberOfExecutors; i++) {
|
||||
executors.add(new TaskExecutor());
|
||||
}
|
||||
|
||||
// randomly assign tasks to executors
|
||||
List<TaskExecutor> assignments = new ArrayList<>();
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
for (int j = 0; j < tasksSubmittedPerThread; j++) {
|
||||
assignments.add(randomFrom(executors));
|
||||
}
|
||||
}
|
||||
|
||||
Map<TaskExecutor, Integer> counts = new HashMap<>();
|
||||
for (TaskExecutor executor : assignments) {
|
||||
counts.merge(executor, 1, (previous, one) -> previous + one);
|
||||
}
|
||||
|
||||
CountDownLatch startingGun = new CountDownLatch(1 + numberOfThreads);
|
||||
List<Thread> threads = new ArrayList<>();
|
||||
for (int i = 0; i < numberOfThreads; i++) {
|
||||
final int index = i;
|
||||
Thread thread = new Thread(() -> {
|
||||
startingGun.countDown();
|
||||
for (int j = 0; j < tasksSubmittedPerThread; j++) {
|
||||
ClusterStateTaskExecutor<Task> executor = assignments.get(index * tasksSubmittedPerThread + j);
|
||||
clusterService.submitStateUpdateTask(
|
||||
Thread.currentThread().getName(),
|
||||
new Task(),
|
||||
ClusterStateTaskConfig.build(Priority.NORMAL),
|
||||
executor,
|
||||
listener);
|
||||
}
|
||||
});
|
||||
threads.add(thread);
|
||||
thread.start();
|
||||
}
|
||||
|
||||
startingGun.countDown();
|
||||
for (Thread thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
// wait until all the cluster state updates have been processed
|
||||
latch.await();
|
||||
|
||||
// assert the number of executed tasks is correct
|
||||
assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get());
|
||||
|
||||
// assert each executor executed the correct number of tasks
|
||||
for (TaskExecutor executor : executors) {
|
||||
assertEquals((int)counts.get(executor), executor.counter.get());
|
||||
}
|
||||
|
||||
// assert the correct number of clusterStateProcessed events were triggered
|
||||
for (Map.Entry<String, AtomicInteger> entry : counters.entrySet()) {
|
||||
assertEquals(entry.getValue().get(), tasksSubmittedPerThread);
|
||||
}
|
||||
}
|
||||
|
||||
@TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level
|
||||
public void testClusterStateUpdateLogging() throws Exception {
|
||||
Settings settings = settingsBuilder()
|
||||
|
@ -947,6 +1055,10 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
private static class BlockingTask extends ClusterStateUpdateTask {
|
||||
private final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
public BlockingTask(Priority priority) {
|
||||
super(priority);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
latch.await();
|
||||
|
@ -963,14 +1075,13 @@ public class ClusterServiceIT extends ESIntegTestCase {
|
|||
|
||||
}
|
||||
|
||||
private static class PrioritiezedTask extends ClusterStateUpdateTask {
|
||||
private static class PrioritizedTask extends ClusterStateUpdateTask {
|
||||
|
||||
private final Priority priority;
|
||||
private final CountDownLatch latch;
|
||||
private final List<PrioritiezedTask> tasks;
|
||||
private final List<PrioritizedTask> tasks;
|
||||
|
||||
private PrioritiezedTask(Priority priority, CountDownLatch latch, List<PrioritiezedTask> tasks) {
|
||||
this.priority = priority;
|
||||
private PrioritizedTask(Priority priority, CountDownLatch latch, List<PrioritizedTask> tasks) {
|
||||
super(priority);
|
||||
this.latch = latch;
|
||||
this.tasks = tasks;
|
||||
}
|
||||
|
|
|
@ -111,7 +111,6 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte
|
|||
assertThat("same shape's hashcode returns different values if called multiple times", firstShape.hashCode(),
|
||||
equalTo(firstShape.hashCode()));
|
||||
assertThat("different shapes should not be equal", mutate(firstShape), not(equalTo(firstShape)));
|
||||
assertThat("different shapes should have different hashcode", mutate(firstShape).hashCode(), not(equalTo(firstShape.hashCode())));
|
||||
|
||||
SB secondShape = copyShape(firstShape);
|
||||
assertTrue("shape is not equal to self", secondShape.equals(secondShape));
|
||||
|
|
|
@ -25,11 +25,7 @@ import org.elasticsearch.action.get.GetResponse;
|
|||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -59,16 +55,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
|||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration;
|
||||
import org.elasticsearch.test.disruption.BlockClusterStateProcessing;
|
||||
import org.elasticsearch.test.disruption.IntermittentLongGCDisruption;
|
||||
import org.elasticsearch.test.disruption.LongGCDisruption;
|
||||
import org.elasticsearch.test.disruption.NetworkDelaysPartition;
|
||||
import org.elasticsearch.test.disruption.NetworkDisconnectPartition;
|
||||
import org.elasticsearch.test.disruption.NetworkPartition;
|
||||
import org.elasticsearch.test.disruption.NetworkUnresponsivePartition;
|
||||
import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
|
||||
import org.elasticsearch.test.disruption.SingleNodeDisruption;
|
||||
import org.elasticsearch.test.disruption.SlowClusterStateProcessing;
|
||||
import org.elasticsearch.test.disruption.*;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
|
@ -78,31 +65,15 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
|
||||
@ESIntegTestCase.SuppressLocalMode
|
||||
|
@ -650,7 +621,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
// but will be queued and once the old master node un-freezes it gets executed.
|
||||
// The old master node will send this update + the cluster state where he is flagged as master to the other
|
||||
// nodes that follow the new master. These nodes should ignore this update.
|
||||
internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
return ClusterState.builder(currentState).build();
|
||||
|
|
|
@ -91,7 +91,10 @@ public class NettyHttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testThatHttpPipeliningWorksWhenEnabled() throws Exception {
|
||||
Settings settings = settingsBuilder().put("http.pipelining", true).build();
|
||||
Settings settings = settingsBuilder()
|
||||
.put("http.pipelining", true)
|
||||
.put("http.port", "0")
|
||||
.build();
|
||||
httpServerTransport = new CustomNettyHttpServerTransport(settings);
|
||||
httpServerTransport.start();
|
||||
InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
|
||||
|
@ -105,7 +108,10 @@ public class NettyHttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testThatHttpPipeliningCanBeDisabled() throws Exception {
|
||||
Settings settings = settingsBuilder().put("http.pipelining", false).build();
|
||||
Settings settings = settingsBuilder()
|
||||
.put("http.pipelining", false)
|
||||
.put("http.port", "0")
|
||||
.build();
|
||||
httpServerTransport = new CustomNettyHttpServerTransport(settings);
|
||||
httpServerTransport.start();
|
||||
InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) randomFrom(httpServerTransport.boundAddress().boundAddresses());
|
||||
|
|
|
@ -76,8 +76,6 @@ public class HttpPipeliningHandlerTests extends ESTestCase {
|
|||
private static final long RESPONSE_TIMEOUT = 10000L;
|
||||
private static final long CONNECTION_TIMEOUT = 10000L;
|
||||
private static final String CONTENT_TYPE_TEXT = "text/plain; charset=UTF-8";
|
||||
// TODO make me random
|
||||
private static final InetSocketAddress HOST_ADDR = new InetSocketAddress(InetAddress.getLoopbackAddress(), 9080);
|
||||
private static final String PATH1 = "/1";
|
||||
private static final String PATH2 = "/2";
|
||||
private static final String SOME_RESPONSE_TEXT = "some response for ";
|
||||
|
@ -90,6 +88,8 @@ public class HttpPipeliningHandlerTests extends ESTestCase {
|
|||
|
||||
private HashedWheelTimer timer;
|
||||
|
||||
private InetSocketAddress boundAddress;
|
||||
|
||||
@Before
|
||||
public void startBootstraps() {
|
||||
clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory());
|
||||
|
@ -118,7 +118,8 @@ public class HttpPipeliningHandlerTests extends ESTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
serverBootstrap.bind(HOST_ADDR);
|
||||
Channel channel = serverBootstrap.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
|
||||
boundAddress = (InetSocketAddress) channel.getLocalAddress();
|
||||
|
||||
timer = new HashedWheelTimer();
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ public class HttpPipeliningHandlerTests extends ESTestCase {
|
|||
responsesIn = new CountDownLatch(1);
|
||||
responses.clear();
|
||||
|
||||
final ChannelFuture connectionFuture = clientBootstrap.connect(HOST_ADDR);
|
||||
final ChannelFuture connectionFuture = clientBootstrap.connect(boundAddress);
|
||||
|
||||
assertTrue(connectionFuture.await(CONNECTION_TIMEOUT));
|
||||
final Channel clientChannel = connectionFuture.getChannel();
|
||||
|
@ -145,11 +146,11 @@ public class HttpPipeliningHandlerTests extends ESTestCase {
|
|||
// NetworkAddress.formatAddress makes a proper HOST header.
|
||||
final HttpRequest request1 = new DefaultHttpRequest(
|
||||
HTTP_1_1, HttpMethod.GET, PATH1);
|
||||
request1.headers().add(HOST, NetworkAddress.formatAddress(HOST_ADDR));
|
||||
request1.headers().add(HOST, NetworkAddress.formatAddress(boundAddress));
|
||||
|
||||
final HttpRequest request2 = new DefaultHttpRequest(
|
||||
HTTP_1_1, HttpMethod.GET, PATH2);
|
||||
request2.headers().add(HOST, NetworkAddress.formatAddress(HOST_ADDR));
|
||||
request2.headers().add(HOST, NetworkAddress.formatAddress(boundAddress));
|
||||
|
||||
clientChannel.write(request1);
|
||||
clientChannel.write(request2);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -47,10 +48,20 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
assertFalse(itr.hasNext());
|
||||
}
|
||||
|
||||
public void testDefaultMapping() {
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
try {
|
||||
lookup.copyAndAddAll(MapperService.DEFAULT_MAPPING, Collections.emptyList());
|
||||
fail();
|
||||
} catch (IllegalArgumentException expected) {
|
||||
assertEquals("Default mappings should not be added to the lookup", expected.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testAddNewField() {
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
FakeFieldMapper f = new FakeFieldMapper("foo", "bar");
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f));
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll("type", newList(f));
|
||||
assertNull(lookup.get("foo"));
|
||||
assertNull(lookup.get("bar"));
|
||||
assertNull(lookup.getByIndexName("foo"));
|
||||
|
@ -67,8 +78,8 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
MappedFieldType originalFieldType = f.fieldType();
|
||||
FakeFieldMapper f2 = new FakeFieldMapper("foo", "foo");
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f));
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f2));
|
||||
lookup = lookup.copyAndAddAll("type1", newList(f));
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2));
|
||||
|
||||
assertNotSame(originalFieldType, f.fieldType());
|
||||
assertSame(f.fieldType(), f2.fieldType());
|
||||
|
@ -82,8 +93,8 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
FakeFieldMapper f2 = new FakeFieldMapper("bar", "foo");
|
||||
MappedFieldType originalFieldType = f.fieldType();
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f));
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f2));
|
||||
lookup = lookup.copyAndAddAll("type1", newList(f));
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2));
|
||||
|
||||
assertNotSame(originalFieldType, f.fieldType());
|
||||
assertSame(f.fieldType(), f2.fieldType());
|
||||
|
@ -98,8 +109,8 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
FakeFieldMapper f2 = new FakeFieldMapper("foo", "bar");
|
||||
MappedFieldType originalFieldType = f.fieldType();
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f));
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll(newList(f2));
|
||||
lookup = lookup.copyAndAddAll("type1", newList(f));
|
||||
FieldTypeLookup lookup2 = lookup.copyAndAddAll("type2", newList(f2));
|
||||
|
||||
assertNotSame(originalFieldType, f.fieldType());
|
||||
assertSame(f.fieldType(), f2.fieldType());
|
||||
|
@ -113,18 +124,18 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
FakeFieldMapper f = new FakeFieldMapper("foo", "foo");
|
||||
FakeFieldMapper f2 = new FakeFieldMapper("bar", "bar");
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f, f2));
|
||||
lookup = lookup.copyAndAddAll("type1", newList(f, f2));
|
||||
|
||||
try {
|
||||
FakeFieldMapper f3 = new FakeFieldMapper("foo", "bar");
|
||||
lookup.copyAndAddAll(newList(f3));
|
||||
lookup.copyAndAddAll("type2", newList(f3));
|
||||
} catch (IllegalStateException e) {
|
||||
assertTrue(e.getMessage().contains("insane mappings"));
|
||||
}
|
||||
|
||||
try {
|
||||
FakeFieldMapper f3 = new FakeFieldMapper("bar", "foo");
|
||||
lookup.copyAndAddAll(newList(f3));
|
||||
lookup.copyAndAddAll("type2", newList(f3));
|
||||
} catch (IllegalStateException e) {
|
||||
assertTrue(e.getMessage().contains("insane mappings"));
|
||||
}
|
||||
|
@ -139,7 +150,7 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
public void testCheckCompatibilityMismatchedTypes() {
|
||||
FieldMapper f1 = new FakeFieldMapper("foo", "bar");
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f1));
|
||||
lookup = lookup.copyAndAddAll("type", newList(f1));
|
||||
|
||||
MappedFieldType ft2 = FakeFieldMapper.makeOtherFieldType("foo", "foo");
|
||||
FieldMapper f2 = new FakeFieldMapper("foo", ft2);
|
||||
|
@ -161,7 +172,7 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
public void testCheckCompatibilityConflict() {
|
||||
FieldMapper f1 = new FakeFieldMapper("foo", "bar");
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f1));
|
||||
lookup = lookup.copyAndAddAll("type", newList(f1));
|
||||
|
||||
MappedFieldType ft2 = FakeFieldMapper.makeFieldType("foo", "bar");
|
||||
ft2.setBoost(2.0f);
|
||||
|
@ -196,7 +207,7 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz");
|
||||
FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo");
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f1, f2));
|
||||
lookup = lookup.copyAndAddAll("type", newList(f1, f2));
|
||||
Collection<String> names = lookup.simpleMatchToIndexNames("b*");
|
||||
assertTrue(names.contains("baz"));
|
||||
assertTrue(names.contains("boo"));
|
||||
|
@ -206,7 +217,7 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz");
|
||||
FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo");
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f1, f2));
|
||||
lookup = lookup.copyAndAddAll("type", newList(f1, f2));
|
||||
Collection<String> names = lookup.simpleMatchToFullName("b*");
|
||||
assertTrue(names.contains("foo"));
|
||||
assertTrue(names.contains("bar"));
|
||||
|
@ -215,7 +226,7 @@ public class FieldTypeLookupTests extends ESTestCase {
|
|||
public void testIteratorImmutable() {
|
||||
FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar");
|
||||
FieldTypeLookup lookup = new FieldTypeLookup();
|
||||
lookup = lookup.copyAndAddAll(newList(f1));
|
||||
lookup = lookup.copyAndAddAll("type", newList(f1));
|
||||
|
||||
try {
|
||||
Iterator<MappedFieldType> itr = lookup.iterator();
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
@ -31,6 +33,11 @@ import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
|
|||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
public class MapperServiceTests extends ESSingleNodeTestCase {
|
||||
@Rule
|
||||
public ExpectedException expectedException = ExpectedException.none();
|
||||
|
@ -82,4 +89,56 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||
.execute()
|
||||
.actionGet();
|
||||
}
|
||||
|
||||
public void testTypes() throws Exception {
|
||||
IndexService indexService1 = createIndex("index1");
|
||||
MapperService mapperService = indexService1.mapperService();
|
||||
assertEquals(Collections.emptySet(), mapperService.types());
|
||||
|
||||
mapperService.merge("type1", new CompressedXContent("{\"type1\":{}}"), true, false);
|
||||
assertNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING));
|
||||
assertEquals(Collections.singleton("type1"), mapperService.types());
|
||||
|
||||
mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedXContent("{\"_default_\":{}}"), true, false);
|
||||
assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING));
|
||||
assertEquals(Collections.singleton("type1"), mapperService.types());
|
||||
|
||||
mapperService.merge("type2", new CompressedXContent("{\"type2\":{}}"), true, false);
|
||||
assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING));
|
||||
assertEquals(new HashSet<>(Arrays.asList("type1", "type2")), mapperService.types());
|
||||
}
|
||||
|
||||
public void testIndexIntoDefaultMapping() throws Throwable {
|
||||
// 1. test implicit index creation
|
||||
try {
|
||||
client().prepareIndex("index1", MapperService.DEFAULT_MAPPING, "1").setSource("{").execute().get();
|
||||
fail();
|
||||
} catch (Throwable t) {
|
||||
if (t instanceof ExecutionException) {
|
||||
t = ((ExecutionException) t).getCause();
|
||||
}
|
||||
if (t instanceof IllegalArgumentException) {
|
||||
assertEquals("It is forbidden to index into the default mapping [_default_]", t.getMessage());
|
||||
} else {
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
||||
// 2. already existing index
|
||||
IndexService indexService = createIndex("index2");
|
||||
try {
|
||||
client().prepareIndex("index2", MapperService.DEFAULT_MAPPING, "2").setSource().execute().get();
|
||||
fail();
|
||||
} catch (Throwable t) {
|
||||
if (t instanceof ExecutionException) {
|
||||
t = ((ExecutionException) t).getCause();
|
||||
}
|
||||
if (t instanceof IllegalArgumentException) {
|
||||
assertEquals("It is forbidden to index into the default mapping [_default_]", t.getMessage());
|
||||
} else {
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,34 +31,24 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.*;
|
||||
import org.elasticsearch.index.mapper.geo.BaseGeoPointFieldMapper;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.doc;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.rootObject;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
|
||||
import static org.elasticsearch.index.mapper.MapperBuilders.*;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -526,4 +516,30 @@ public class MultiFieldTests extends ESSingleNodeTestCase {
|
|||
assertTrue(e.getMessage().contains("cannot be used in multi field"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testMultiFieldWithDot() throws IOException {
|
||||
XContentBuilder mapping = jsonBuilder();
|
||||
mapping.startObject()
|
||||
.startObject("my_type")
|
||||
.startObject("properties")
|
||||
.startObject("city")
|
||||
.field("type", "string")
|
||||
.startObject("fields")
|
||||
.startObject("raw.foo")
|
||||
.field("type", "string")
|
||||
.field("index", "not_analyzed")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
try {
|
||||
mapperService.documentMapperParser().parse(mapping.string());
|
||||
fail("this should throw an exception because one field contains a dot");
|
||||
} catch (MapperParsingException e) {
|
||||
assertThat(e.getMessage(), equalTo("Field name [raw.foo] which is a multi field of [city] cannot contain '.'"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,9 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.mapper.MergeResult;
|
||||
import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -107,6 +109,100 @@ public class UpdateMappingTests extends ESSingleNodeTestCase {
|
|||
assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate));
|
||||
}
|
||||
|
||||
public void testConflictSameType() throws Exception {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("foo").field("type", "long").endObject()
|
||||
.endObject().endObject().endObject();
|
||||
MapperService mapperService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping).mapperService();
|
||||
|
||||
XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("foo").field("type", "double").endObject()
|
||||
.endObject().endObject().endObject();
|
||||
|
||||
try {
|
||||
mapperService.merge("type", new CompressedXContent(update.string()), false, false);
|
||||
fail();
|
||||
} catch (MergeMappingException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
mapperService.merge("type", new CompressedXContent(update.string()), false, false);
|
||||
fail();
|
||||
} catch (MergeMappingException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
assertTrue(mapperService.documentMapper("type").mapping().root().getMapper("foo") instanceof LongFieldMapper);
|
||||
}
|
||||
|
||||
public void testConflictNewType() throws Exception {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
|
||||
.startObject("properties").startObject("foo").field("type", "long").endObject()
|
||||
.endObject().endObject().endObject();
|
||||
MapperService mapperService = createIndex("test", Settings.settingsBuilder().build(), "type1", mapping).mapperService();
|
||||
|
||||
XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2")
|
||||
.startObject("properties").startObject("foo").field("type", "double").endObject()
|
||||
.endObject().endObject().endObject();
|
||||
|
||||
try {
|
||||
mapperService.merge("type2", new CompressedXContent(update.string()), false, false);
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
assertTrue(e.getMessage().contains("conflicts with existing mapping in other types"));
|
||||
}
|
||||
|
||||
try {
|
||||
mapperService.merge("type2", new CompressedXContent(update.string()), false, false);
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
assertTrue(e.getMessage().contains("conflicts with existing mapping in other types"));
|
||||
}
|
||||
|
||||
assertTrue(mapperService.documentMapper("type1").mapping().root().getMapper("foo") instanceof LongFieldMapper);
|
||||
assertNull(mapperService.documentMapper("type2"));
|
||||
}
|
||||
|
||||
// same as the testConflictNewType except that the mapping update is on an existing type
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/15049")
|
||||
public void testConflictNewTypeUpdate() throws Exception {
|
||||
XContentBuilder mapping1 = XContentFactory.jsonBuilder().startObject().startObject("type1")
|
||||
.startObject("properties").startObject("foo").field("type", "long").endObject()
|
||||
.endObject().endObject().endObject();
|
||||
XContentBuilder mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject();
|
||||
MapperService mapperService = createIndex("test", Settings.settingsBuilder().build()).mapperService();
|
||||
|
||||
mapperService.merge("type1", new CompressedXContent(mapping1.string()), false, false);
|
||||
mapperService.merge("type2", new CompressedXContent(mapping2.string()), false, false);
|
||||
|
||||
XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2")
|
||||
.startObject("properties").startObject("foo").field("type", "double").endObject()
|
||||
.endObject().endObject().endObject();
|
||||
|
||||
try {
|
||||
mapperService.merge("type2", new CompressedXContent(update.string()), false, false);
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
assertTrue(e.getMessage().contains("conflicts with existing mapping in other types"));
|
||||
}
|
||||
|
||||
try {
|
||||
mapperService.merge("type2", new CompressedXContent(update.string()), false, false);
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
assertTrue(e.getMessage().contains("conflicts with existing mapping in other types"));
|
||||
}
|
||||
|
||||
assertTrue(mapperService.documentMapper("type1").mapping().root().getMapper("foo") instanceof LongFieldMapper);
|
||||
assertNotNull(mapperService.documentMapper("type2"));
|
||||
assertNull(mapperService.documentMapper("type2").mapping().root().getMapper("foo"));
|
||||
}
|
||||
|
||||
public void testIndexFieldParsingBackcompat() throws IOException {
|
||||
IndexService indexService = createIndex("test", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build());
|
||||
XContentBuilder indexMapping = XContentFactory.jsonBuilder();
|
||||
|
|
|
@ -33,9 +33,7 @@ import org.elasticsearch.test.geo.RandomShapeGenerator;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDistanceQueryBuilder> {
|
||||
|
||||
|
@ -86,7 +84,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
}
|
||||
fail("must not be null or empty");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("fieldName must not be null or empty"));
|
||||
}
|
||||
|
||||
GeoDistanceQueryBuilder query = new GeoDistanceQueryBuilder("fieldName");
|
||||
|
@ -98,7 +96,7 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
}
|
||||
fail("must not be null or empty");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("distance must not be null or empty"));
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -107,44 +105,52 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
} else {
|
||||
query.distance(null, DistanceUnit.DEFAULT);
|
||||
}
|
||||
fail("must not be null or empty");
|
||||
fail("distance must not be null or empty");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("distance must not be null or empty"));
|
||||
}
|
||||
|
||||
try {
|
||||
query.distance("1", null);
|
||||
fail("unit must not be null");
|
||||
if (randomBoolean()) {
|
||||
query.distance("1", null);
|
||||
} else {
|
||||
query.distance(1, null);
|
||||
}
|
||||
fail("distance must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("distance unit must not be null"));
|
||||
}
|
||||
|
||||
try {
|
||||
query.distance(1, null);
|
||||
fail("unit must not be null");
|
||||
query.distance(randomIntBetween(Integer.MIN_VALUE, 0), DistanceUnit.DEFAULT);
|
||||
fail("distance must be greater than zero");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("distance must be greater than zero"));
|
||||
}
|
||||
|
||||
try {
|
||||
query.geohash(null);
|
||||
if (randomBoolean()) {
|
||||
query.geohash(null);
|
||||
} else {
|
||||
query.geohash("");
|
||||
}
|
||||
fail("geohash must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("geohash must not be null or empty"));
|
||||
}
|
||||
|
||||
try {
|
||||
query.geoDistance(null);
|
||||
fail("geodistance must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("geoDistance must not be null"));
|
||||
}
|
||||
|
||||
try {
|
||||
query.optimizeBbox(null);
|
||||
fail("optimizeBbox must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// expected
|
||||
assertThat(ex.getMessage(), equalTo("optimizeBbox must not be null"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -387,15 +393,15 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
|
|||
|
||||
public void testFromJson() throws IOException {
|
||||
String json =
|
||||
"{\n" +
|
||||
" \"geo_distance\" : {\n" +
|
||||
" \"pin.location\" : [ -70.0, 40.0 ],\n" +
|
||||
" \"distance\" : 12000.0,\n" +
|
||||
" \"distance_type\" : \"sloppy_arc\",\n" +
|
||||
" \"optimize_bbox\" : \"memory\",\n" +
|
||||
" \"validation_method\" : \"STRICT\",\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" }\n" +
|
||||
"{\n" +
|
||||
" \"geo_distance\" : {\n" +
|
||||
" \"pin.location\" : [ -70.0, 40.0 ],\n" +
|
||||
" \"distance\" : 12000.0,\n" +
|
||||
" \"distance_type\" : \"sloppy_arc\",\n" +
|
||||
" \"optimize_bbox\" : \"memory\",\n" +
|
||||
" \"validation_method\" : \"STRICT\",\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
GeoDistanceQueryBuilder parsed = (GeoDistanceQueryBuilder) parseQuery(json);
|
||||
checkGeneratedJson(json, parsed);
|
||||
|
|
|
@ -22,8 +22,8 @@ package org.elasticsearch.indices.mapping;
|
|||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.common.Priority;
|
||||
|
@ -43,22 +43,14 @@ import java.util.Map;
|
|||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.*;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
@ClusterScope(randomDynamicTemplates = false)
|
||||
public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
||||
|
||||
public void testDynamicUpdates() throws Exception {
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(
|
||||
|
|
|
@ -161,6 +161,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
@TestLogging("cluster.service:TRACE")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932")
|
||||
public void testDeleteCreateInOneBulk() throws Exception {
|
||||
internalCluster().startNodesAsync(2, Settings.builder()
|
||||
.put(DiscoveryModule.DISCOVERY_TYPE_KEY, "zen")
|
||||
|
|
|
@ -27,13 +27,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
|
||||
|
@ -56,11 +50,7 @@ import org.elasticsearch.test.disruption.BlockClusterStateProcessing;
|
|||
import org.elasticsearch.test.disruption.SingleNodeDisruption;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
@ -407,7 +397,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
// disable relocations when we do this, to make sure the shards are not relocated from node2
|
||||
// due to rebalancing, and delete its content
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get();
|
||||
internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder("test");
|
||||
|
|
|
@ -118,7 +118,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
|||
|
||||
assertHitCount(searchResponse, 1);
|
||||
assertThat(searchResponse.getHits().getAt(0).field("field1").value().toString(), equalTo("value1"));
|
||||
assertThat(searchResponse.getHits().getAt(0).field("field2").value().toString(), equalTo("value 2")); // this will still be loaded because of the source feature
|
||||
// field2 is not stored.
|
||||
assertThat(searchResponse.getHits().getAt(0).field("field2"), nullValue());
|
||||
|
||||
client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet();
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase {
|
|||
.endObject()));
|
||||
}
|
||||
assertAcked(prepareCreate(HIGH_CARD_IDX_NAME).setSettings(Settings.builder().put("number_of_shards", 2))
|
||||
.addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long", "tag", "type=string,index=not_analyzed"));
|
||||
.addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=geo_point", MULTI_VALUED_FIELD_NAME, "type=geo_point", NUMBER_FIELD_NAME, "type=long,store=true", "tag", "type=string,index=not_analyzed"));
|
||||
|
||||
for (int i = 0; i < 2000; i++) {
|
||||
singleVal = singleValues[i % numUniqueGeoPoints];
|
||||
|
@ -196,8 +196,8 @@ public abstract class AbstractGeoTestCase extends ESIntegTestCase {
|
|||
SearchHitField hitField = searchHit.field(NUMBER_FIELD_NAME);
|
||||
|
||||
assertThat("Hit " + i + " has wrong number of values", hitField.getValues().size(), equalTo(1));
|
||||
Integer value = hitField.getValue();
|
||||
assertThat("Hit " + i + " has wrong value", value, equalTo(i));
|
||||
Long value = hitField.getValue();
|
||||
assertThat("Hit " + i + " has wrong value", value.intValue(), equalTo(i));
|
||||
}
|
||||
assertThat(totalHits, equalTo(2000l));
|
||||
}
|
||||
|
|
|
@ -532,8 +532,8 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
topHits("hits").setSize(1)
|
||||
.highlighter(new HighlightBuilder().field("text"))
|
||||
.setExplain(true)
|
||||
.addFieldDataField("field1")
|
||||
.addField("text")
|
||||
.addFieldDataField("field1")
|
||||
.addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap()))
|
||||
.setFetchSource("text", null)
|
||||
.setVersion(true)
|
||||
|
@ -569,8 +569,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
SearchHitField field = hit.field("field1");
|
||||
assertThat(field.getValue().toString(), equalTo("5"));
|
||||
|
||||
field = hit.field("text");
|
||||
assertThat(field.getValue().toString(), equalTo("some text to entertain"));
|
||||
assertThat(hit.getSource().get("text").toString(), equalTo("some text to entertain"));
|
||||
|
||||
field = hit.field("script");
|
||||
assertThat(field.getValue().toString(), equalTo("5"));
|
||||
|
|
|
@ -0,0 +1,332 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.highlight;
|
||||
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.query.IdsQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder.Field;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class HighlightBuilderTests extends ESTestCase {
|
||||
|
||||
private static final int NUMBER_OF_TESTBUILDERS = 20;
|
||||
private static NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
/**
|
||||
* setup for the whole base test class
|
||||
*/
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
if (namedWriteableRegistry == null) {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.registerPrototype(QueryBuilder.class, new MatchAllQueryBuilder());
|
||||
namedWriteableRegistry.registerPrototype(QueryBuilder.class, new IdsQueryBuilder());
|
||||
namedWriteableRegistry.registerPrototype(QueryBuilder.class, new TermQueryBuilder("field", "value"));
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
namedWriteableRegistry = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test serialization and deserialization of the highlighter builder
|
||||
*/
|
||||
public void testSerialization() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
HighlightBuilder original = randomHighlighterBuilder();
|
||||
HighlightBuilder deserialized = serializedCopy(original);
|
||||
assertEquals(deserialized, original);
|
||||
assertEquals(deserialized.hashCode(), original.hashCode());
|
||||
assertNotSame(deserialized, original);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test equality and hashCode properties
|
||||
*/
|
||||
public void testEqualsAndHashcode() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
HighlightBuilder firstBuilder = randomHighlighterBuilder();
|
||||
assertFalse("highlighter is equal to null", firstBuilder.equals(null));
|
||||
assertFalse("highlighter is equal to incompatible type", firstBuilder.equals(""));
|
||||
assertTrue("highlighter is not equal to self", firstBuilder.equals(firstBuilder));
|
||||
assertThat("same highlighter's hashcode returns different values if called multiple times", firstBuilder.hashCode(),
|
||||
equalTo(firstBuilder.hashCode()));
|
||||
assertThat("different highlighters should not be equal", mutate(firstBuilder), not(equalTo(firstBuilder)));
|
||||
|
||||
HighlightBuilder secondBuilder = serializedCopy(firstBuilder);
|
||||
assertTrue("highlighter is not equal to self", secondBuilder.equals(secondBuilder));
|
||||
assertTrue("highlighter is not equal to its copy", firstBuilder.equals(secondBuilder));
|
||||
assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder));
|
||||
assertThat("highlighter copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(firstBuilder.hashCode()));
|
||||
|
||||
HighlightBuilder thirdBuilder = serializedCopy(secondBuilder);
|
||||
assertTrue("highlighter is not equal to self", thirdBuilder.equals(thirdBuilder));
|
||||
assertTrue("highlighter is not equal to its copy", secondBuilder.equals(thirdBuilder));
|
||||
assertThat("highlighter copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(thirdBuilder.hashCode()));
|
||||
assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder));
|
||||
assertThat("highlighter copy's hashcode is different from original hashcode", firstBuilder.hashCode(), equalTo(thirdBuilder.hashCode()));
|
||||
assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder));
|
||||
assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* create random shape that is put under test
|
||||
*/
|
||||
private static HighlightBuilder randomHighlighterBuilder() {
|
||||
HighlightBuilder testHighlighter = new HighlightBuilder();
|
||||
setRandomCommonOptions(testHighlighter);
|
||||
testHighlighter.useExplicitFieldOrder(randomBoolean());
|
||||
if (randomBoolean()) {
|
||||
testHighlighter.encoder(randomFrom(Arrays.asList(new String[]{"default", "html"})));
|
||||
}
|
||||
int numberOfFields = randomIntBetween(1,5);
|
||||
for (int i = 0; i < numberOfFields; i++) {
|
||||
Field field = new Field(randomAsciiOfLengthBetween(1, 10));
|
||||
setRandomCommonOptions(field);
|
||||
if (randomBoolean()) {
|
||||
field.fragmentOffset(randomIntBetween(1, 100));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
field.matchedFields(randomStringArray(0, 4));
|
||||
}
|
||||
testHighlighter.field(field);
|
||||
}
|
||||
return testHighlighter;
|
||||
}
|
||||
|
||||
private static void setRandomCommonOptions(AbstractHighlighterBuilder highlightBuilder) {
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.preTags(randomStringArray(0, 3));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.postTags(randomStringArray(0, 3));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.fragmentSize(randomIntBetween(0, 100));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.numOfFragments(randomIntBetween(0, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.highlighterType(randomAsciiOfLengthBetween(1, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.fragmenter(randomAsciiOfLengthBetween(1, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
QueryBuilder highlightQuery;
|
||||
switch (randomInt(2)) {
|
||||
case 0:
|
||||
highlightQuery = new MatchAllQueryBuilder();
|
||||
break;
|
||||
case 1:
|
||||
highlightQuery = new IdsQueryBuilder();
|
||||
break;
|
||||
default:
|
||||
case 2:
|
||||
highlightQuery = new TermQueryBuilder(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
break;
|
||||
}
|
||||
highlightQuery.boost((float) randomDoubleBetween(0, 10, false));
|
||||
highlightBuilder.highlightQuery(highlightQuery);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.order(randomAsciiOfLengthBetween(1, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.highlightFilter(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.forceSource(randomBoolean());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.boundaryMaxScan(randomIntBetween(0, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.boundaryChars(randomAsciiOfLengthBetween(1, 10).toCharArray());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.noMatchSize(randomIntBetween(0, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.phraseLimit(randomIntBetween(0, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int items = randomIntBetween(0, 5);
|
||||
Map<String, Object> options = new HashMap<String, Object>(items);
|
||||
for (int i = 0; i < items; i++) {
|
||||
Object value = null;
|
||||
switch (randomInt(2)) {
|
||||
case 0:
|
||||
value = randomAsciiOfLengthBetween(1, 10);
|
||||
break;
|
||||
case 1:
|
||||
value = new Integer(randomInt(1000));
|
||||
break;
|
||||
case 2:
|
||||
value = new Boolean(randomBoolean());
|
||||
break;
|
||||
}
|
||||
options.put(randomAsciiOfLengthBetween(1, 10), value);
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
highlightBuilder.requireFieldMatch(randomBoolean());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static void mutateCommonOptions(AbstractHighlighterBuilder highlightBuilder) {
|
||||
switch (randomIntBetween(1, 16)) {
|
||||
case 1:
|
||||
highlightBuilder.preTags(randomStringArray(4, 6));
|
||||
break;
|
||||
case 2:
|
||||
highlightBuilder.postTags(randomStringArray(4, 6));
|
||||
break;
|
||||
case 3:
|
||||
highlightBuilder.fragmentSize(randomIntBetween(101, 200));
|
||||
break;
|
||||
case 4:
|
||||
highlightBuilder.numOfFragments(randomIntBetween(11, 20));
|
||||
break;
|
||||
case 5:
|
||||
highlightBuilder.highlighterType(randomAsciiOfLengthBetween(11, 20));
|
||||
break;
|
||||
case 6:
|
||||
highlightBuilder.fragmenter(randomAsciiOfLengthBetween(11, 20));
|
||||
break;
|
||||
case 7:
|
||||
highlightBuilder.highlightQuery(new TermQueryBuilder(randomAsciiOfLengthBetween(11, 20), randomAsciiOfLengthBetween(11, 20)));
|
||||
break;
|
||||
case 8:
|
||||
highlightBuilder.order(randomAsciiOfLengthBetween(11, 20));
|
||||
break;
|
||||
case 9:
|
||||
highlightBuilder.highlightFilter(toggleOrSet(highlightBuilder.highlightFilter()));
|
||||
case 10:
|
||||
highlightBuilder.forceSource(toggleOrSet(highlightBuilder.forceSource()));
|
||||
break;
|
||||
case 11:
|
||||
highlightBuilder.boundaryMaxScan(randomIntBetween(11, 20));
|
||||
break;
|
||||
case 12:
|
||||
highlightBuilder.boundaryChars(randomAsciiOfLengthBetween(11, 20).toCharArray());
|
||||
break;
|
||||
case 13:
|
||||
highlightBuilder.noMatchSize(randomIntBetween(11, 20));
|
||||
break;
|
||||
case 14:
|
||||
highlightBuilder.phraseLimit(randomIntBetween(11, 20));
|
||||
break;
|
||||
case 15:
|
||||
int items = 6;
|
||||
Map<String, Object> options = new HashMap<String, Object>(items);
|
||||
for (int i = 0; i < items; i++) {
|
||||
options.put(randomAsciiOfLengthBetween(1, 10), randomAsciiOfLengthBetween(1, 10));
|
||||
}
|
||||
highlightBuilder.options(options);
|
||||
break;
|
||||
case 16:
|
||||
highlightBuilder.requireFieldMatch(toggleOrSet(highlightBuilder.requireFieldMatch()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private static Boolean toggleOrSet(Boolean flag) {
|
||||
if (flag == null) {
|
||||
return randomBoolean();
|
||||
} else {
|
||||
return !flag.booleanValue();
|
||||
}
|
||||
}
|
||||
|
||||
private static String[] randomStringArray(int minSize, int maxSize) {
|
||||
int size = randomIntBetween(minSize, maxSize);
|
||||
String[] randomStrings = new String[size];
|
||||
for (int f = 0; f < size; f++) {
|
||||
randomStrings[f] = randomAsciiOfLengthBetween(1, 10);
|
||||
}
|
||||
return randomStrings;
|
||||
}
|
||||
|
||||
/**
|
||||
* mutate the given highlighter builder so the returned one is different in one aspect
|
||||
*/
|
||||
private static HighlightBuilder mutate(HighlightBuilder original) throws IOException {
|
||||
HighlightBuilder mutation = serializedCopy(original);
|
||||
if (randomBoolean()) {
|
||||
mutateCommonOptions(mutation);
|
||||
} else {
|
||||
switch (randomIntBetween(0, 2)) {
|
||||
// change settings that only exists on top level
|
||||
case 0:
|
||||
mutation.useExplicitFieldOrder(!original.useExplicitFieldOrder()); break;
|
||||
case 1:
|
||||
mutation.encoder(original.encoder() + randomAsciiOfLength(2)); break;
|
||||
case 2:
|
||||
if (randomBoolean()) {
|
||||
// add another field
|
||||
mutation.field(new Field(randomAsciiOfLength(10)));
|
||||
} else {
|
||||
// change existing fields
|
||||
List<Field> originalFields = original.fields();
|
||||
Field fieldToChange = originalFields.get(randomInt(originalFields.size() - 1));
|
||||
if (randomBoolean()) {
|
||||
fieldToChange.fragmentOffset(randomIntBetween(101, 200));
|
||||
} else {
|
||||
fieldToChange.matchedFields(randomStringArray(5, 10));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return mutation;
|
||||
}
|
||||
|
||||
private static HighlightBuilder serializedCopy(HighlightBuilder original) throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
original.writeTo(output);
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
|
||||
return HighlightBuilder.PROTOTYPE.readFrom(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -333,9 +333,9 @@ public class SimpleSearchIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testQueryNumericFieldWithRegex() throws Exception {
|
||||
createIndex("idx");
|
||||
indexRandom(true, client().prepareIndex("idx", "type").setSource("num", 34));
|
||||
|
||||
assertAcked(prepareCreate("idx").addMapping("type", "num", "type=integer"));
|
||||
ensureGreen("idx");
|
||||
|
||||
try {
|
||||
client().prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", "34")).get();
|
||||
fail("SearchPhaseExecutionException should have been thrown");
|
||||
|
|
|
@ -20,12 +20,7 @@ package org.elasticsearch.snapshots;
|
|||
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
|
@ -208,7 +203,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
|
|||
|
||||
private void addBlock() {
|
||||
// We should block after this task - add blocking cluster state update task
|
||||
clusterService.submitStateUpdateTask("test_block", passThroughPriority, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("test_block", new ClusterStateUpdateTask(passThroughPriority) {
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
while(System.currentTimeMillis() < stopWaitingAt) {
|
||||
|
|
|
@ -50,7 +50,11 @@ import static org.hamcrest.Matchers.is;
|
|||
*/
|
||||
public class NettySizeHeaderFrameDecoderTests extends ESTestCase {
|
||||
|
||||
private final Settings settings = settingsBuilder().put("name", "foo").put("transport.host", "127.0.0.1").build();
|
||||
private final Settings settings = settingsBuilder()
|
||||
.put("name", "foo")
|
||||
.put("transport.host", "127.0.0.1")
|
||||
.put("transport.tcp.port", "0")
|
||||
.build();
|
||||
|
||||
private ThreadPool threadPool;
|
||||
private NettyTransport nettyTransport;
|
||||
|
|
|
@ -49,9 +49,7 @@ public class NettyScheduledPingTests extends ESTestCase {
|
|||
public void testScheduledPing() throws Exception {
|
||||
ThreadPool threadPool = new ThreadPool(getClass().getName());
|
||||
|
||||
int startPort = 11000 + randomIntBetween(0, 255);
|
||||
int endPort = startPort + 10;
|
||||
Settings settings = Settings.builder().put(NettyTransport.PING_SCHEDULE, "5ms").put("transport.tcp.port", startPort + "-" + endPort).build();
|
||||
Settings settings = Settings.builder().put(NettyTransport.PING_SCHEDULE, "5ms").put("transport.tcp.port", 0).build();
|
||||
|
||||
final NettyTransport nettyA = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, new NamedWriteableRegistry());
|
||||
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.transport.netty;
|
||||
|
||||
import com.carrotsearch.hppc.IntHashSet;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
|
@ -27,176 +25,115 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
|||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.network.NetworkUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.junit.rule.RepeatOnExceptionRule;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BindTransportException;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class NettyTransportMultiPortTests extends ESTestCase {
|
||||
|
||||
private static final int MAX_RETRIES = 10;
|
||||
private String host;
|
||||
|
||||
@Rule
|
||||
public RepeatOnExceptionRule repeatOnBindExceptionRule = new RepeatOnExceptionRule(logger, MAX_RETRIES, BindTransportException.class);
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
if (randomBoolean()) {
|
||||
host = "localhost";
|
||||
if (NetworkUtils.SUPPORTS_V6 && randomBoolean()) {
|
||||
host = "::1";
|
||||
} else {
|
||||
if (NetworkUtils.SUPPORTS_V6 && randomBoolean()) {
|
||||
host = "::1";
|
||||
} else {
|
||||
host = "127.0.0.1";
|
||||
}
|
||||
host = "127.0.0.1";
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatNettyCanBindToMultiplePorts() throws Exception {
|
||||
int[] ports = getRandomPorts(3);
|
||||
|
||||
Settings settings = settingsBuilder()
|
||||
.put("network.host", host)
|
||||
.put("transport.tcp.port", ports[0])
|
||||
.put("transport.profiles.default.port", ports[1])
|
||||
.put("transport.profiles.client1.port", ports[2])
|
||||
.put("transport.tcp.port", 22) // will not actually bind to this
|
||||
.put("transport.profiles.default.port", 0)
|
||||
.put("transport.profiles.client1.port", 0)
|
||||
.build();
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertConnectionRefused(ports[0]);
|
||||
assertPortIsBound(ports[1]);
|
||||
assertPortIsBound(ports[2]);
|
||||
try (NettyTransport transport = startNettyTransport(settings, threadPool)) {
|
||||
assertEquals(1, transport.profileBoundAddresses().size());
|
||||
assertEquals(1, transport.boundAddress().boundAddresses().length);
|
||||
} finally {
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatDefaultProfileInheritsFromStandardSettings() throws Exception {
|
||||
int[] ports = getRandomPorts(2);
|
||||
|
||||
Settings settings = settingsBuilder()
|
||||
.put("network.host", host)
|
||||
.put("transport.tcp.port", ports[0])
|
||||
.put("transport.profiles.client1.port", ports[1])
|
||||
.put("transport.tcp.port", 0)
|
||||
.put("transport.profiles.client1.port", 0)
|
||||
.build();
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertPortIsBound(ports[0]);
|
||||
assertPortIsBound(ports[1]);
|
||||
try (NettyTransport transport = startNettyTransport(settings, threadPool)) {
|
||||
assertEquals(1, transport.profileBoundAddresses().size());
|
||||
assertEquals(1, transport.boundAddress().boundAddresses().length);
|
||||
} finally {
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatProfileWithoutPortSettingsFails() throws Exception {
|
||||
int[] ports = getRandomPorts(1);
|
||||
|
||||
Settings settings = settingsBuilder()
|
||||
.put("network.host", host)
|
||||
.put("transport.tcp.port", ports[0])
|
||||
.put("transport.tcp.port", 0)
|
||||
.put("transport.profiles.client1.whatever", "foo")
|
||||
.build();
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertPortIsBound(ports[0]);
|
||||
try (NettyTransport transport = startNettyTransport(settings, threadPool)) {
|
||||
assertEquals(0, transport.profileBoundAddresses().size());
|
||||
assertEquals(1, transport.boundAddress().boundAddresses().length);
|
||||
} finally {
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatDefaultProfilePortOverridesGeneralConfiguration() throws Exception {
|
||||
int[] ports = getRandomPorts(3);
|
||||
|
||||
Settings settings = settingsBuilder()
|
||||
.put("network.host", host)
|
||||
.put("transport.tcp.port", ports[0])
|
||||
.put("transport.netty.port", ports[1])
|
||||
.put("transport.profiles.default.port", ports[2])
|
||||
.put("transport.tcp.port", 22) // will not actually bind to this
|
||||
.put("transport.netty.port", 23) // will not actually bind to this
|
||||
.put("transport.profiles.default.port", 0)
|
||||
.build();
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertConnectionRefused(ports[0]);
|
||||
assertConnectionRefused(ports[1]);
|
||||
assertPortIsBound(ports[2]);
|
||||
try (NettyTransport transport = startNettyTransport(settings, threadPool)) {
|
||||
assertEquals(0, transport.profileBoundAddresses().size());
|
||||
assertEquals(1, transport.boundAddress().boundAddresses().length);
|
||||
} finally {
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatProfileWithoutValidNameIsIgnored() throws Exception {
|
||||
int[] ports = getRandomPorts(3);
|
||||
|
||||
Settings settings = settingsBuilder()
|
||||
.put("network.host", host)
|
||||
.put("transport.tcp.port", ports[0])
|
||||
.put("transport.tcp.port", 0)
|
||||
// mimics someone trying to define a profile for .local which is the profile for a node request to itself
|
||||
.put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", ports[1])
|
||||
.put("transport.profiles..port", ports[2])
|
||||
.put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", 22) // will not actually bind to this
|
||||
.put("transport.profiles..port", 23) // will not actually bind to this
|
||||
.build();
|
||||
|
||||
ThreadPool threadPool = new ThreadPool("tst");
|
||||
try (NettyTransport ignored = startNettyTransport(settings, threadPool)) {
|
||||
assertPortIsBound(ports[0]);
|
||||
assertConnectionRefused(ports[1]);
|
||||
assertConnectionRefused(ports[2]);
|
||||
try (NettyTransport transport = startNettyTransport(settings, threadPool)) {
|
||||
assertEquals(0, transport.profileBoundAddresses().size());
|
||||
assertEquals(1, transport.boundAddress().boundAddresses().length);
|
||||
} finally {
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
private int[] getRandomPorts(int numberOfPorts) {
|
||||
IntHashSet ports = new IntHashSet();
|
||||
|
||||
int nextPort = randomIntBetween(49152, 65535);
|
||||
for (int i = 0; i < numberOfPorts; i++) {
|
||||
boolean foundPortInRange = false;
|
||||
while (!foundPortInRange) {
|
||||
if (!ports.contains(nextPort)) {
|
||||
logger.debug("looking to see if port [{}]is available", nextPort);
|
||||
try (ServerSocket serverSocket = new ServerSocket()) {
|
||||
// Set SO_REUSEADDR as we may bind here and not be able
|
||||
// to reuse the address immediately without it.
|
||||
serverSocket.setReuseAddress(NetworkUtils.defaultReuseAddress());
|
||||
serverSocket.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), nextPort));
|
||||
|
||||
// bind was a success
|
||||
logger.debug("port [{}] available.", nextPort);
|
||||
foundPortInRange = true;
|
||||
ports.add(nextPort);
|
||||
} catch (IOException e) {
|
||||
// Do nothing
|
||||
logger.debug("port [{}] not available.", e, nextPort);
|
||||
}
|
||||
}
|
||||
nextPort = randomIntBetween(49152, 65535);
|
||||
}
|
||||
}
|
||||
return ports.toArray();
|
||||
}
|
||||
|
||||
private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) {
|
||||
BigArrays bigArrays = new MockBigArrays(new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService());
|
||||
|
||||
|
@ -206,36 +143,4 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
|||
assertThat(nettyTransport.lifecycleState(), is(Lifecycle.State.STARTED));
|
||||
return nettyTransport;
|
||||
}
|
||||
|
||||
private void assertConnectionRefused(int port) throws Exception {
|
||||
try {
|
||||
trySocketConnection(new InetSocketTransportAddress(InetAddress.getByName(host), port).address());
|
||||
fail("Expected to get exception when connecting to port " + port);
|
||||
} catch (IOException e) {
|
||||
// expected
|
||||
logger.info("Got expected connection message {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void assertPortIsBound(int port) throws Exception {
|
||||
assertPortIsBound(host, port);
|
||||
}
|
||||
|
||||
private void assertPortIsBound(String host, int port) throws Exception {
|
||||
logger.info("Trying to connect to [{}]:[{}]", host, port);
|
||||
trySocketConnection(new InetSocketTransportAddress(InetAddress.getByName(host), port).address());
|
||||
}
|
||||
|
||||
private void trySocketConnection(InetSocketAddress address) throws Exception {
|
||||
try (Socket socket = new Socket()) {
|
||||
logger.info("Connecting to {}", address);
|
||||
socket.connect(address, 500);
|
||||
|
||||
assertThat(socket.isConnected(), is(true));
|
||||
try (OutputStream os = socket.getOutputStream()) {
|
||||
os.write("foo".getBytes(StandardCharsets.UTF_8));
|
||||
os.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,9 +38,7 @@ import static org.hamcrest.Matchers.containsString;
|
|||
public class SimpleNettyTransportTests extends AbstractSimpleTransportTestCase {
|
||||
@Override
|
||||
protected MockTransportService build(Settings settings, Version version, NamedWriteableRegistry namedWriteableRegistry) {
|
||||
int startPort = 11000 + randomIntBetween(0, 255);
|
||||
int endPort = startPort + 10;
|
||||
settings = Settings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
|
||||
settings = Settings.builder().put(settings).put("transport.tcp.port", "0").build();
|
||||
MockTransportService transportService = new MockTransportService(settings, new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, version, namedWriteableRegistry), threadPool);
|
||||
transportService.start();
|
||||
return transportService;
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
import org.apache.tools.ant.filters.FixCrLfFilter
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.precommit.DependencyLicensesTask
|
||||
import org.elasticsearch.gradle.precommit.UpdateShasTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.elasticsearch.gradle.EmptyDirTask
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
|
@ -293,13 +294,16 @@ configure(subprojects.findAll { it.name == 'deb' || it.name == 'rpm' }) {
|
|||
|
||||
// TODO: dependency checks should really be when building the jar itself, which would remove the need
|
||||
// for this hackery and instead we can do this inside the BuildPlugin
|
||||
task check(group: 'Verification', description: 'Runs all checks.') {} // dummy task!
|
||||
DependencyLicensesTask.configure(project) {
|
||||
task dependencyLicenses(type: DependencyLicensesTask) {
|
||||
dependsOn = [dependencyFiles]
|
||||
dependencies = dependencyFiles
|
||||
mapping from: /lucene-.*/, to: 'lucene'
|
||||
mapping from: /jackson-.*/, to: 'jackson'
|
||||
}
|
||||
task check(group: 'Verification', description: 'Runs all checks.', dependsOn: dependencyLicenses) {} // dummy task!
|
||||
task updateShas(type: UpdateShasTask) {
|
||||
parentTask = dependencyLicenses
|
||||
}
|
||||
|
||||
RunTask.configure(project)
|
||||
|
||||
|
|
|
@ -144,9 +144,6 @@ Also see the {client}/php-api/current/index.html[official Elasticsearch PHP clie
|
|||
|
||||
Also see the {client}/python-api/current/index.html[official Elasticsearch Python client].
|
||||
|
||||
* http://github.com/elasticsearch/elasticsearch-dsl-py[elasticsearch-dsl-py]
|
||||
chainable query and filter construction built on top of official client.
|
||||
|
||||
* http://github.com/rhec/pyelasticsearch[pyelasticsearch]:
|
||||
Python client.
|
||||
|
||||
|
|
|
@ -34,6 +34,62 @@ For example, you can define the latest version in your `pom.xml` file:
|
|||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
== Dealing with JAR dependency conflicts
|
||||
|
||||
If you want to use Elasticsearch in your Java application, you may have to deal with version conflicts with third party
|
||||
dependencies like Guava and Joda. For instance, perhaps Elasticsearch uses Joda 2.8, while your code uses Joda 2.1.
|
||||
|
||||
You have two choices:
|
||||
|
||||
* The simplest solution is to upgrade. Newer module versions are likely to have fixed old bugs.
|
||||
The further behind you fall, the harder it will be to upgrade later. Of course, it is possible that you are using a
|
||||
third party dependency that in turn depends on an outdated version of a package, which prevents you from upgrading.
|
||||
|
||||
* The second option is to relocate the troublesome dependencies and to shade them either with your own application
|
||||
or with Elasticsearch and any plugins needed by the Elasticsearch client.
|
||||
|
||||
The https://www.elastic.co/blog/to-shade-or-not-to-shade["To shade or not to shade" blog post] describes
|
||||
all the steps for doing so.
|
||||
|
||||
== Embedding jar with dependencies
|
||||
|
||||
If you want to create a single jar containing your application and all dependencies, you should not
|
||||
use `maven-assembly-plugin` for that because it can not deal with `META-INF/services` structure which is
|
||||
required by Lucene jars.
|
||||
|
||||
Instead, you can use `maven-shade-plugin` and configure it as follow:
|
||||
|
||||
[source,xml]
|
||||
--------------------------------------------------
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<version>2.4.1</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
<goals><goal>shade</goal></goals>
|
||||
<configuration>
|
||||
<transformers>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
</transformers>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
--------------------------------------------------
|
||||
|
||||
Note that if you have a `main` class you want to automatically call when running `java -jar yourjar.jar`, just add
|
||||
it to the `transformers`:
|
||||
|
||||
[source,xml]
|
||||
--------------------------------------------------
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
||||
<mainClass>org.elasticsearch.demo.Generate</mainClass>
|
||||
</transformer>
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
== Deploying in JBoss EAP6 module
|
||||
|
||||
Elasticsearch and Lucene classes need to be in the same JBoss module.
|
||||
|
|
|
@ -198,7 +198,7 @@ The following settings are supported:
|
|||
request. Beyond this threshold, the S3 repository will use the
|
||||
http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html[AWS Multipart Upload API]
|
||||
to split the chunk into several parts, each of `buffer_size` length, and
|
||||
to upload each part in its own request. Note that positioning a buffer
|
||||
to upload each part in its own request. Note that setting a buffer
|
||||
size lower than `5mb` is not allowed since it will prevents the use of the
|
||||
Multipart API and may result in upload errors. Defaults to `5mb`.
|
||||
|
||||
|
|
|
@ -7,7 +7,29 @@ ground for all Elasticsearch-related code in Python; because of this it tries
|
|||
to be opinion-free and very extendable. The full documentation is available at
|
||||
http://elasticsearch-py.rtfd.org/
|
||||
|
||||
It can be installed with:
|
||||
.Elasticsearch DSL
|
||||
************************************************************************************
|
||||
For a more high level client library with more limited scope, have a look at
|
||||
http://elasticsearch-dsl.rtfd.org/[elasticsearch-dsl] - a more pythonic library
|
||||
sitting on top of `elasticsearch-py`.
|
||||
|
||||
It provides a more convenient and idiomatic way to write and manipulate
|
||||
http://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html[queries]. It
|
||||
stays close to the Elasticsearch JSON DSL, mirroring its terminology and
|
||||
structure while exposing the whole range of the DSL from Python either directly
|
||||
using defined classes or a queryset-like expressions.
|
||||
|
||||
It also provides an optional
|
||||
http://elasticsearch-dsl.readthedocs.org/en/latest/persistence.html#doctype[persistence
|
||||
layer] for working with documents as Python objects in an ORM-like fashion:
|
||||
defining mappings, retrieving and saving documents, wrapping the document data
|
||||
in user-defined classes.
|
||||
************************************************************************************
|
||||
|
||||
|
||||
=== Installation
|
||||
|
||||
It can be installed with pip:
|
||||
|
||||
[source,sh]
|
||||
------------------------------------
|
||||
|
@ -16,13 +38,24 @@ pip install elasticsearch
|
|||
|
||||
=== Versioning
|
||||
|
||||
There are two branches for development - `master` and `0.4`. Master branch is
|
||||
used to track all the changes for Elasticsearch 1.0 and beyond whereas 0.4
|
||||
tracks Elasticsearch 0.90.
|
||||
There are two branches for development - `master` and `1.x`. Master branch is
|
||||
used to track all the changes for Elasticsearch 2.0 and beyond whereas 1.x
|
||||
tracks Elasticsearch 1.*.
|
||||
|
||||
Releases with major version 1 (1.X.Y) are to be used with Elasticsearch 1.* and
|
||||
later, 0.4 releases are meant to work with Elasticsearch 0.90.*.
|
||||
|
||||
The recommended way to set your requirements in your `setup.py` or
|
||||
`requirements.txt` is:
|
||||
|
||||
------------------------------------
|
||||
# Elasticsearch 2.x
|
||||
elasticsearch>=2.0.0,<3.0.0
|
||||
|
||||
# Elasticsearch 1.x
|
||||
elasticsearch>=1.0.0,<2.0.0
|
||||
------------------------------------
|
||||
|
||||
=== Example use
|
||||
|
||||
Simple use-case:
|
||||
|
@ -71,6 +104,10 @@ The client's features include:
|
|||
|
||||
* pluggable architecture
|
||||
|
||||
The client also contains a convenient set of
|
||||
http://elasticsearch-py.readthedocs.org/en/master/helpers.html[helpers] for
|
||||
some of the more engaging tasks like bulk indexing and reindexing.
|
||||
|
||||
|
||||
=== License
|
||||
|
||||
|
|
|
@ -468,3 +468,7 @@ response is output by default.
|
|||
Finally, the API for org.elasticsearch.monitor.os.OsStats has changed. The `getLoadAverage` method has been removed. The
|
||||
value for this can now be obtained from `OsStats.Cpu#getLoadAverage`. Additionally, the recent CPU usage can be obtained
|
||||
from `OsStats.Cpu#getPercent`.
|
||||
|
||||
=== Fields option
|
||||
Only stored fields are retrievable with this option.
|
||||
The fields option won't be able to load non stored fields from _source anymore.
|
||||
|
|
|
@ -121,10 +121,12 @@ curl -XPOST localhost:9200/_search -d '{
|
|||
"functions": [
|
||||
{
|
||||
"script_score": {
|
||||
"lang": "groovy",
|
||||
"file": "calculate-score",
|
||||
"params": {
|
||||
"my_modifier": 8
|
||||
"script": {
|
||||
"lang": "groovy",
|
||||
"file": "calculate-score",
|
||||
"params": {
|
||||
"my_modifier": 8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -180,10 +182,12 @@ curl -XPOST localhost:9200/_search -d '{
|
|||
"functions": [
|
||||
{
|
||||
"script_score": {
|
||||
"id": "indexedCalculateScore",
|
||||
"lang" : "groovy",
|
||||
"params": {
|
||||
"my_modifier": 8
|
||||
"script": {
|
||||
"id": "indexedCalculateScore",
|
||||
"lang" : "groovy",
|
||||
"params": {
|
||||
"my_modifier": 8
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,9 +29,4 @@ subprojects {
|
|||
// for local ES plugins, the name of the plugin is the same as the directory
|
||||
name project.name
|
||||
}
|
||||
|
||||
Task dependencyLicensesTask = DependencyLicensesTask.configure(project) {
|
||||
dependencies = project.configurations.runtime - project.configurations.provided
|
||||
}
|
||||
project.precommit.dependsOn(dependencyLicensesTask)
|
||||
}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
This plugin has no third party dependencies
|
|
@ -1 +0,0 @@
|
|||
This plugin has no third party dependencies
|
|
@ -739,7 +739,7 @@ public class GeoDistanceTests extends ESIntegTestCase {
|
|||
for (int i = 0; i < 10; ++i) {
|
||||
final double originLat = randomLat();
|
||||
final double originLon = randomLon();
|
||||
final String distance = DistanceUnit.KILOMETERS.toString(randomInt(10000));
|
||||
final String distance = DistanceUnit.KILOMETERS.toString(randomIntBetween(1, 10000));
|
||||
for (GeoDistance geoDistance : Arrays.asList(GeoDistance.ARC, GeoDistance.SLOPPY_ARC)) {
|
||||
logger.info("Now testing GeoDistance={}, distance={}, origin=({}, {})", geoDistance, distance, originLat, originLon);
|
||||
GeoDistanceQueryBuilder qb = QueryBuilders.geoDistanceQuery("location").point(originLat, originLon).distance(distance).geoDistance(geoDistance);
|
||||
|
@ -772,4 +772,4 @@ public class GeoDistanceTests extends ESIntegTestCase {
|
|||
}
|
||||
return matches;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,12 +108,12 @@ public class SearchFieldsTests extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
|
||||
|
||||
// field2 is not stored, check that it gets extracted from source
|
||||
// field2 is not stored, check that it is not extracted from source.
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field2").execute().actionGet();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field2").value().toString(), equalTo("value2"));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(0));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field2"), nullValue());
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field3").execute().actionGet();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
|
||||
|
@ -121,6 +121,34 @@ public class SearchFieldsTests extends ESIntegTestCase {
|
|||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*3").execute().actionGet();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
|
||||
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*3").addField("field1").addField("field2").execute().actionGet();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
|
||||
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("field*").execute().actionGet();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(2));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field1").value().toString(), equalTo("value1"));
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("f*3").execute().actionGet();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().size(), equalTo(1));
|
||||
assertThat(searchResponse.getHits().getAt(0).fields().get("field3").value().toString(), equalTo("value3"));
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addField("*").execute().actionGet();
|
||||
assertThat(searchResponse.getHits().getTotalHits(), equalTo(1l));
|
||||
assertThat(searchResponse.getHits().hits().length, equalTo(1));
|
||||
|
@ -439,8 +467,7 @@ public class SearchFieldsTests extends ESIntegTestCase {
|
|||
.get();
|
||||
|
||||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
assertThat(searchResponse.getHits().getAt(0).field("field1").isMetadataField(), equalTo(false));
|
||||
assertThat(searchResponse.getHits().getAt(0).field("field1").getValue().toString(), equalTo("value"));
|
||||
assertThat(searchResponse.getHits().getAt(0).field("field1"), nullValue());
|
||||
assertThat(searchResponse.getHits().getAt(0).field("_routing").isMetadataField(), equalTo(true));
|
||||
assertThat(searchResponse.getHits().getAt(0).field("_routing").getValue().toString(), equalTo("1"));
|
||||
}
|
||||
|
@ -647,8 +674,7 @@ public class SearchFieldsTests extends ESIntegTestCase {
|
|||
|
||||
Map<String, SearchHitField> fields = response.getHits().getAt(0).getFields();
|
||||
|
||||
assertThat(fields.get("field1").isMetadataField(), equalTo(false));
|
||||
assertThat(fields.get("field1").getValue().toString(), equalTo("value"));
|
||||
assertThat(fields.get("field1"), nullValue());
|
||||
assertThat(fields.get("_routing").isMetadataField(), equalTo(true));
|
||||
assertThat(fields.get("_routing").getValue().toString(), equalTo("1"));
|
||||
assertThat(fields.get("_timestamp").isMetadataField(), equalTo(true));
|
||||
|
|
|
@ -624,7 +624,7 @@ public class AttachmentMapper extends FieldMapper {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(name());
|
||||
builder.startObject(simpleName());
|
||||
builder.field("type", CONTENT_TYPE);
|
||||
if (indexCreatedBefore2x) {
|
||||
builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
|
||||
|
|
|
@ -22,10 +22,14 @@ package org.elasticsearch.mapper.attachments;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
|
@ -107,4 +111,32 @@ public class SimpleAttachmentMapperTests extends AttachmentUnitTestCase {
|
|||
assertThat(doc.get(docMapper.mappers().getMapper("file.content").fieldType().names().indexName()), containsString("This document tests the ability of Apache Tika to extract content"));
|
||||
}
|
||||
|
||||
/**
|
||||
* See issue https://github.com/elastic/elasticsearch-mapper-attachments/issues/169
|
||||
* Mapping should not contain field names with dot.
|
||||
*/
|
||||
public void testMapperErrorWithDotTwoLevels169() throws Exception {
|
||||
XContentBuilder mappingBuilder = jsonBuilder();
|
||||
mappingBuilder.startObject()
|
||||
.startObject("mail")
|
||||
.startObject("properties")
|
||||
.startObject("attachments")
|
||||
.startObject("properties")
|
||||
.startObject("innerfield")
|
||||
.field("type", "attachment")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
||||
byte[] mapping = mappingBuilder.bytes().toBytes();
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.EMPTY);
|
||||
DocumentMapper docMapper = mapperService.parse("mail", new CompressedXContent(mapping), true);
|
||||
// this should not throw an exception
|
||||
mapperService.parse("mail", new CompressedXContent(docMapper.mapping().toString()), true);
|
||||
// the mapping may not contain a field name with a dot
|
||||
assertFalse(docMapper.mapping().toString().contains("."));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
This plugin has no third party dependencies
|
|
@ -1 +0,0 @@
|
|||
This plugin has no third party dependencies
|
|
@ -1 +0,0 @@
|
|||
This plugin has no third party dependencies
|
|
@ -1 +0,0 @@
|
|||
This plugin has no third party dependencies
|
|
@ -79,7 +79,6 @@
|
|||
body:
|
||||
fields: [ include.field2 ]
|
||||
query: { match_all: {} }
|
||||
- match: { hits.hits.0.fields: { include.field2 : [v2] }}
|
||||
- is_false: hits.hits.0._source
|
||||
|
||||
- do:
|
||||
|
@ -87,7 +86,7 @@
|
|||
body:
|
||||
fields: [ include.field2, _source ]
|
||||
query: { match_all: {} }
|
||||
- match: { hits.hits.0.fields: { include.field2 : [v2] }}
|
||||
- match: { hits.hits.0._source.include.field2: v2 }
|
||||
- is_true: hits.hits.0._source
|
||||
|
||||
|
||||
|
@ -95,4 +94,3 @@
|
|||
search:
|
||||
fielddata_fields: [ "count" ]
|
||||
- match: { hits.hits.0.fields.count: [1] }
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.OperationRouting;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleListener;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
|
@ -115,12 +114,12 @@ public class NoopClusterService implements ClusterService {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
|
||||
public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
|
||||
public <T> void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.cluster.routing.OperationRouting;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.PendingClusterTask;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleListener;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -40,10 +39,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
|||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Queue;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
|
||||
|
@ -183,31 +179,35 @@ public class TestClusterService implements ClusterService {
|
|||
}
|
||||
|
||||
@Override
|
||||
synchronized public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) {
|
||||
logger.debug("processing [{}]", source);
|
||||
if (state().nodes().localNodeMaster() == false && updateTask.runOnlyOnMaster()) {
|
||||
updateTask.onNoLongerMaster(source);
|
||||
logger.debug("failed [{}], no longer master", source);
|
||||
return;
|
||||
}
|
||||
ClusterState newState;
|
||||
ClusterState previousClusterState = state;
|
||||
try {
|
||||
newState = updateTask.execute(previousClusterState);
|
||||
} catch (Exception e) {
|
||||
updateTask.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", e));
|
||||
return;
|
||||
}
|
||||
setStateAndNotifyListeners(newState);
|
||||
if (updateTask instanceof ClusterStateUpdateTask) {
|
||||
((ClusterStateUpdateTask) updateTask).clusterStateProcessed(source, previousClusterState, newState);
|
||||
}
|
||||
logger.debug("finished [{}]", source);
|
||||
public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
|
||||
submitStateUpdateTask(source, null, updateTask, updateTask, updateTask);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
|
||||
submitStateUpdateTask(source, Priority.NORMAL, updateTask);
|
||||
synchronized public <T> void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
|
||||
logger.debug("processing [{}]", source);
|
||||
if (state().nodes().localNodeMaster() == false && executor.runOnlyOnMaster()) {
|
||||
listener.onNoLongerMaster(source);
|
||||
logger.debug("failed [{}], no longer master", source);
|
||||
return;
|
||||
}
|
||||
ClusterStateTaskExecutor.BatchResult<T> batchResult;
|
||||
ClusterState previousClusterState = state;
|
||||
try {
|
||||
batchResult = executor.execute(previousClusterState, Arrays.asList(task));
|
||||
} catch (Exception e) {
|
||||
batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failure(task, e).build(previousClusterState);
|
||||
}
|
||||
|
||||
batchResult.executionResults.get(task).handle(
|
||||
() -> {},
|
||||
ex -> listener.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", ex))
|
||||
);
|
||||
|
||||
setStateAndNotifyListeners(batchResult.resultingState);
|
||||
listener.clusterStateProcessed(source, previousClusterState, batchResult.resultingState);
|
||||
logger.debug("finished [{}]", source);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -58,7 +58,7 @@ public class BlockClusterStateProcessing extends SingleNodeDisruption {
|
|||
boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
|
||||
assert success : "startDisrupting called without waiting on stopDistrupting to complete";
|
||||
final CountDownLatch started = new CountDownLatch(1);
|
||||
clusterService.submitStateUpdateTask("service_disruption_block", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
|
|
|
@ -102,7 +102,7 @@ public class SlowClusterStateProcessing extends SingleNodeDisruption {
|
|||
return false;
|
||||
}
|
||||
final AtomicBoolean stopped = new AtomicBoolean(false);
|
||||
clusterService.submitStateUpdateTask("service_disruption_delay", Priority.IMMEDIATE, new ClusterStateUpdateTask() {
|
||||
clusterService.submitStateUpdateTask("service_disruption_delay", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
|
||||
|
||||
@Override
|
||||
public boolean runOnlyOnMaster() {
|
||||
|
|
Loading…
Reference in New Issue