Merge remote-tracking branch 'upstream/master' into feature/ingest

Conflicts:
	plugins/pom.xml
	qa/smoke-test-plugins/pom.xml
This commit is contained in:
Tal Levy 2015-11-02 10:03:10 -08:00
commit 4f5f2ff5e9
545 changed files with 2783 additions and 9763 deletions

85
.dir-locals.el Normal file
View File

@ -0,0 +1,85 @@
((java-mode
.
((eval
.
(progn
(defun my/point-in-defun-declaration-p ()
(let ((bod (save-excursion (c-beginning-of-defun)
(point))))
(<= bod
(point)
(save-excursion (goto-char bod)
(re-search-forward "{")
(point)))))
(defun my/is-string-concatenation-p ()
"Returns true if the previous line is a string concatenation"
(save-excursion
(let ((start (point)))
(forward-line -1)
(if (re-search-forward " \\\+$" start t) t nil))))
(defun my/inside-java-lambda-p ()
"Returns true if point is the first statement inside of a lambda"
(save-excursion
(c-beginning-of-statement-1)
(let ((start (point)))
(forward-line -1)
(if (search-forward " -> {" start t) t nil))))
(defun my/trailing-paren-p ()
"Returns true if point is a training paren and semicolon"
(save-excursion
(end-of-line)
(let ((endpoint (point)))
(beginning-of-line)
(if (re-search-forward "[ ]*);$" endpoint t) t nil))))
(defun my/prev-line-call-with-no-args-p ()
"Return true if the previous line is a function call with no arguments"
(save-excursion
(let ((start (point)))
(forward-line -1)
(if (re-search-forward ".($" start t) t nil))))
(defun my/arglist-cont-nonempty-indentation (arg)
(if (my/inside-java-lambda-p)
'+
(if (my/is-string-concatenation-p)
16
(unless (my/point-in-defun-declaration-p) '++))))
(defun my/statement-block-intro (arg)
(if (and (c-at-statement-start-p) (my/inside-java-lambda-p)) 0 '+))
(defun my/block-close (arg)
(if (my/inside-java-lambda-p) '- 0))
(defun my/arglist-close (arg) (if (my/trailing-paren-p) 0 '--))
(defun my/arglist-intro (arg)
(if (my/prev-line-call-with-no-args-p) '++ 0))
(c-set-offset 'inline-open 0)
(c-set-offset 'topmost-intro-cont '+)
(c-set-offset 'statement-block-intro 'my/statement-block-intro)
(c-set-offset 'block-close 'my/block-close)
(c-set-offset 'knr-argdecl-intro '+)
(c-set-offset 'substatement-open '+)
(c-set-offset 'substatement-label '+)
(c-set-offset 'case-label '+)
(c-set-offset 'label '+)
(c-set-offset 'statement-case-open '+)
(c-set-offset 'statement-cont '++)
(c-set-offset 'arglist-intro 'my/arglist-intro)
(c-set-offset 'arglist-cont-nonempty '(my/arglist-cont-nonempty-indentation c-lineup-arglist))
(c-set-offset 'arglist-close 'my/arglist-close)
(c-set-offset 'inexpr-class 0)
(c-set-offset 'access-label 0)
(c-set-offset 'inher-intro '++)
(c-set-offset 'inher-cont '++)
(c-set-offset 'brace-list-intro '+)
(c-set-offset 'func-decl-cont '++)
))
(c-basic-offset . 4)
(c-comment-only-line-offset . (0 . 0)))))

32
.projectile Normal file
View File

@ -0,0 +1,32 @@
-/target
-/core/target
-/qa/target
-/rest-api-spec/target
-/test-framework/target
-/plugins/target
-/plugins/analysis-icu/target
-/plugins/analysis-kuromoji/target
-/plugins/analysis-phonetic/target
-/plugins/analysis-smartcn/target
-/plugins/analysis-stempel/target
-/plugins/cloud-aws/target
-/plugins/cloud-azure/target
-/plugins/cloud-gce/target
-/plugins/delete-by-query/target
-/plugins/discovery-azure/target
-/plugins/discovery-ec2/target
-/plugins/discovery-gce/target
-/plugins/discovery-multicast/target
-/plugins/jvm-example/target
-/plugins/lang-expression/target
-/plugins/lang-groovy/target
-/plugins/lang-javascript/target
-/plugins/lang-python/target
-/plugins/mapper-murmur3/target
-/plugins/mapper-size/target
-/plugins/repository-azure/target
-/plugins/repository-s3/target
-/plugins/site-example/target
-/plugins/store-smb/target
-/plugins/target
-*.class

View File

@ -1,18 +0,0 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=enabled
org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
org.eclipse.jdt.core.compiler.annotation.nullable=org.elasticsearch.common.Nullable
org.eclipse.jdt.core.compiler.annotation.nullanalysis=enabled
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7
org.eclipse.jdt.core.compiler.compliance=1.7
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
org.eclipse.jdt.core.compiler.problem.nonnullParameterAnnotationDropped=warning
org.eclipse.jdt.core.compiler.problem.nullAnnotationInferenceConflict=warning
org.eclipse.jdt.core.compiler.problem.nullReference=warning
org.eclipse.jdt.core.compiler.problem.nullSpecViolation=warning
org.eclipse.jdt.core.compiler.problem.nullUncheckedConversion=warning
org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
org.eclipse.jdt.core.compiler.source=1.7
org.eclipse.jdt.core.formatter.lineSplit=140
org.eclipse.jdt.core.formatter.tabulation.char=space
org.eclipse.jdt.core.formatter.tabulation.size=4

View File

@ -76,7 +76,7 @@ Contributing to the Elasticsearch codebase
**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch)
Make sure you have [Gradle](http://gradle.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `gradle eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace` and make sure to select `Search for nested projects...` option as Elasticsearch is a multi-module maven project. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors.
Make sure you have [Gradle](http://gradle.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE: `gradle eclipse` then `File: Import: Existing Projects into Workspace`. Select the option `Search for nested projects`. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors.
Please follow these formatting guidelines:

7
GRADLE.CHEATSHEET Normal file
View File

@ -0,0 +1,7 @@
As a quick helper, below are the equivalent commands from maven to gradle (TESTING.md has also been updated). You can also run "gradle tasks" to see all tasks that are available to run.
clean -> clean
test -> test
verify -> check
verify -Dskip.unit.tests -> integTest
package -DskipTests -> assemble
install -DskipTests -> install

View File

@ -18,6 +18,7 @@
*/
import com.bmuschko.gradle.nexus.NexusPlugin
import org.gradle.plugins.ide.eclipse.model.SourceFolder
buildscript {
repositories {
@ -134,7 +135,12 @@ subprojects {
dependencySubstitution {
substitute module("org.elasticsearch:rest-api-spec:${version}") with project("${projectsPrefix}:rest-api-spec")
substitute module("org.elasticsearch:elasticsearch:${version}") with project("${projectsPrefix}:core")
substitute module("org.elasticsearch:test-framework:${version}") with project("${projectsPrefix}:test-framework")
// so that eclipse doesn't have circular references
// the downside is, if you hack on test-framework, you have to gradle install
// the first prop detects eclipse itself, the second detects eclipse from commandline
if (System.getProperty("eclipse.launcher") == null && gradle.startParameter.taskNames.contains('eclipse') == false) {
substitute module("org.elasticsearch:test-framework:${version}") with project("${projectsPrefix}:test-framework")
}
substitute module("org.elasticsearch.distribution.zip:elasticsearch:${version}") with project("${projectsPrefix}:distribution:zip")
}
}
@ -142,22 +148,13 @@ subprojects {
}
}
// IDE configuration
// intellij configuration
allprojects {
apply plugin: 'idea'
apply plugin: 'eclipse'
// TODO: similar for intellij
eclipse {
classpath {
defaultOutputDir = new File(project.buildDir, 'eclipse')
}
}
}
idea {
if (project != null) {
// could be null, if this project is attached to another...
if (hasProperty('projectsPrefix') == false) {
idea {
project {
languageLevel = sourceCompatibility
vcs = 'Git'
@ -165,3 +162,35 @@ idea {
}
}
// eclipse configuration
allprojects {
apply plugin: 'eclipse'
plugins.withType(JavaBasePlugin) {
eclipse.classpath.defaultOutputDir = new File(project.buildDir, 'eclipse')
eclipse.classpath.file.whenMerged { classpath ->
// give each source folder a unique corresponding output folder
int i = 0;
classpath.entries.findAll { it instanceof SourceFolder }.each { folder ->
i++;
// this is *NOT* a path or a file.
folder.output = "build/eclipse/" + i
}
}
}
task cleanEclipseSettings(type: Delete) {
delete '.settings'
}
task copyEclipseSettings(type: Copy) {
// TODO: "package this up" for external builds
from new File(project.rootDir, 'buildSrc/src/main/resources/eclipse.settings')
into '.settings'
}
// otherwise .settings is not nuked entirely
tasks.cleanEclipse.dependsOn(cleanEclipseSettings)
// otherwise the eclipse merging is *super confusing*
tasks.eclipse.dependsOn(cleanEclipse)
tasks.eclipse.dependsOn(copyEclipseSettings)
}

View File

@ -46,11 +46,11 @@ props.load(project.file('../gradle.properties').newDataInputStream())
version = props.getProperty('version')
processResources {
inputs.file('../gradle.properties')
filter ReplaceTokens, tokens: [
'version': props.getProperty('version'),
'luceneVersion': props.getProperty('luceneVersion')
]
inputs.file('../gradle.properties')
filter ReplaceTokens, tokens: [
'version': props.getProperty('version'),
'luceneVersion': props.getProperty('luceneVersion')
]
}
extraArchive {

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.SuiteBalancer
import com.carrotsearch.ant.tasks.junit4.balancers.ExecutionTimeBalancer

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
import com.carrotsearch.ant.tasks.junit4.listeners.antxml.AntXmlReport

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
import org.gradle.api.logging.LogLevel
import org.gradle.api.logging.Logger

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.JUnit4
import org.gradle.api.AntBuilder

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.ListenersList
import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener
@ -20,8 +20,6 @@ import javax.inject.Inject
class RandomizedTestingTask extends DefaultTask {
PatternFilterable patternSet = new PatternSet()
// TODO: change to "executable" to match gradle test params?
@Optional
@Input
@ -64,6 +62,7 @@ class RandomizedTestingTask extends DefaultTask {
List<String> jvmArgs = new ArrayList<>()
Map<String, String> systemProperties = new HashMap<>()
PatternFilterable patternSet = new PatternSet()
RandomizedTestingTask() {
outputs.upToDateWhen {false} // randomized tests are never up to date
@ -166,7 +165,6 @@ class RandomizedTestingTask extends DefaultTask {
}
// TODO: add leaveTemporary
// TODO: add jvmOutputAction?
// TODO: add ifNoTests!
@TaskAction

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
class SlowTestsConfiguration {
int heartbeat = 0

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
class StackTraceFiltersConfiguration {
List<String> patterns = new ArrayList<>()

View File

@ -0,0 +1,34 @@
package com.carrotsearch.gradle.junit4
import org.gradle.util.ConfigureUtil
class TestLoggingConfiguration {
/** Display mode for output streams. */
static enum OutputMode {
/** Always display the output emitted from tests. */
ALWAYS,
/**
* Display the output only if a test/ suite failed. This requires internal buffering
* so the output will be shown only after a test completes.
*/
ONERROR,
/** Don't display the output, even on test failures. */
NEVER
}
OutputMode outputMode = OutputMode.ONERROR
SlowTestsConfiguration slowTests = new SlowTestsConfiguration()
StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration()
void slowTests(Closure closure) {
ConfigureUtil.configure(closure, slowTests)
}
void stackTraceFilters(Closure closure) {
ConfigureUtil.configure(closure, stackTraceFilters)
}
void outputMode(String mode) {
outputMode = mode.toUpperCase() as OutputMode
}
}

View File

@ -17,7 +17,7 @@
* under the License.
*/
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.JUnit4
import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.eventbus.Subscribe

View File

@ -1,4 +1,4 @@
package com.carrotsearch.gradle.randomizedtesting
package com.carrotsearch.gradle.junit4
import com.carrotsearch.ant.tasks.junit4.JUnit4
import com.carrotsearch.ant.tasks.junit4.Pluralize
@ -18,6 +18,7 @@ import org.junit.runner.Description
import java.util.concurrent.atomic.AtomicInteger
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.*
import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode
class TestReportLogger extends TestsSummaryEventListener implements AggregatedEventListener {
@ -54,20 +55,6 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
LoggingOutputStream outStream
LoggingOutputStream errStream
/** Display mode for output streams. */
static enum OutputMode {
/** Always display the output emitted from tests. */
ALWAYS,
/**
* Display the output only if a test/ suite failed. This requires internal buffering
* so the output will be shown only after a test completes.
*/
ONERROR,
/** Don't display the output, even on test failures. */
NEVER
}
OutputMode outputMode = OutputMode.ONERROR
/** A list of failed tests, if to be displayed at the end. */
List<Description> failedTests = new ArrayList<>()
@ -238,7 +225,7 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
}
void emitBufferedEvents(LogLevel level, AggregatedSuiteResultEvent e) throws IOException {
if (outputMode == OutputMode.NEVER) {
if (config.outputMode == OutputMode.NEVER) {
return
}
@ -247,8 +234,8 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
eventMap.put(tre.getTestFinishedEvent(), tre)
}
final boolean emitOutput = outputMode == OutputMode.ALWAYS && isPassthrough() == false ||
outputMode == OutputMode.ONERROR && e.isSuccessful() == false
final boolean emitOutput = config.outputMode == OutputMode.ALWAYS && isPassthrough() == false ||
config.outputMode == OutputMode.ONERROR && e.isSuccessful() == false
for (IEvent event : e.getEventStream()) {
switch (event.getType()) {
@ -363,7 +350,7 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
/** Returns true if output should be logged immediately. Only relevant when running with INFO log level. */
boolean isPassthrough() {
return forkedJvmCount == 1 && outputMode == OutputMode.ALWAYS && logger.isInfoEnabled()
return forkedJvmCount == 1 && config.outputMode == OutputMode.ALWAYS && logger.isInfoEnabled()
}
@Override

View File

@ -1,16 +0,0 @@
package com.carrotsearch.gradle.randomizedtesting
import org.gradle.util.ConfigureUtil
class TestLoggingConfiguration {
SlowTestsConfiguration slowTests = new SlowTestsConfiguration()
StackTraceFiltersConfiguration stackTraceFilters = new StackTraceFiltersConfiguration()
void slowTests(Closure closure) {
ConfigureUtil.configure(closure, slowTests)
}
void stackTraceFilters(Closure closure) {
ConfigureUtil.configure(closure, stackTraceFilters)
}
}

View File

@ -19,12 +19,14 @@
package org.elasticsearch.gradle
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.GradleException
import org.gradle.api.JavaVersion
import org.gradle.api.Plugin
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.tasks.bundling.Jar
import org.gradle.api.tasks.compile.JavaCompile
import org.gradle.util.VersionNumber
/**
* Encapsulates build configuration for elasticsearch projects.
@ -33,8 +35,9 @@ class BuildPlugin implements Plugin<Project> {
@Override
void apply(Project project) {
globalBuildInfo(project)
project.pluginManager.apply('java')
project.pluginManager.apply('carrotsearch.randomizedtesting')
project.pluginManager.apply('carrotsearch.randomized-testing')
// these plugins add lots of info to our jars
project.pluginManager.apply('nebula.info-broker')
project.pluginManager.apply('nebula.info-basic')
@ -48,6 +51,25 @@ class BuildPlugin implements Plugin<Project> {
PrecommitTasks.configure(project)
}
static void globalBuildInfo(Project project) {
if (project.rootProject.ext.has('buildChecksDone') == false) {
// enforce gradle version
VersionNumber gradleVersion = VersionNumber.parse(project.gradle.gradleVersion)
if (gradleVersion.major < 2 || gradleVersion.major == 2 && gradleVersion.minor < 6) {
throw new GradleException('Gradle 2.6 or above is required to build elasticsearch')
}
// Build debugging info
println '======================================='
println 'Elasticsearch Build Hamster says Hello!'
println '======================================='
println " Gradle Version : ${project.gradle.gradleVersion}"
println " JDK Version : ${System.getProperty('java.runtime.version')} (${System.getProperty('java.vendor')})"
println " OS Info : ${System.getProperty('os.name')} ${System.getProperty('os.version')} (${System.getProperty('os.arch')})"
project.rootProject.ext.buildChecksDone = true
}
}
/** Adds compiler settings to the project */
static void configureCompile(Project project) {
project.afterEvaluate {
@ -129,6 +151,7 @@ class BuildPlugin implements Plugin<Project> {
regex(/^(\s+at )(org\.apache\.lucene\.util\.TestRule)/)
regex(/^(\s+at )(org\.apache\.lucene\.util\.AbstractBeforeAfterRule)/)
}
outputMode System.getProperty('tests.output', 'onerror')
}
balancers {

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.randomizedtesting.RandomizedTestingTask
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.gradle.api.Project
import org.gradle.api.Task

View File

@ -18,7 +18,7 @@
*/
package org.elasticsearch.gradle.test
import com.carrotsearch.gradle.randomizedtesting.RandomizedTestingTask
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.ElasticsearchProperties
import org.gradle.api.Plugin
import org.gradle.api.Project
@ -29,8 +29,7 @@ class RestTestPlugin implements Plugin<Project> {
@Override
void apply(Project project) {
project.pluginManager.apply('java-base')
project.pluginManager.apply('carrotsearch.randomizedtesting')
project.pluginManager.apply('idea')
project.pluginManager.apply('carrotsearch.randomized-testing')
// remove some unnecessary tasks for a qa test
project.tasks.removeAll { it.name in ['assemble', 'buildDependents'] }
@ -53,6 +52,7 @@ class RestTestPlugin implements Plugin<Project> {
project.eclipse {
classpath {
sourceSets = [project.sourceSets.test]
plusConfigurations = [project.configurations.testRuntime]
}
}
}

View File

@ -0,0 +1 @@
implementation-class=com.carrotsearch.gradle.junit4.RandomizedTestingPlugin

View File

@ -1 +0,0 @@
implementation-class=com.carrotsearch.gradle.randomizedtesting.RandomizedTestingPlugin

View File

@ -1,6 +1,6 @@
eclipse.preferences.version=1
encoding//src/main/java=UTF-8
encoding//src/main/resources=UTF-8
encoding//src/test/java=UTF-8
encoding//src/test/resources=UTF-8
encoding/<project>=UTF-8
encoding/rest-api-spec=UTF-8
encoding/<project>=UTF-8

View File

@ -0,0 +1,22 @@
eclipse.preferences.version=1
# previous configuration from maven build
# this is merged with gradle's generated properties during 'gradle eclipse'
# NOTE: null pointer analysis etc is not enabled currently, it seems very unstable
# (e.g. crashing eclipse etc)
# org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=enabled
# org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
# org.eclipse.jdt.core.compiler.annotation.nullable=org.elasticsearch.common.Nullable
# org.eclipse.jdt.core.compiler.annotation.nullanalysis=enabled
# org.eclipse.jdt.core.compiler.problem.nonnullParameterAnnotationDropped=warning
# org.eclipse.jdt.core.compiler.problem.nullAnnotationInferenceConflict=warning
# org.eclipse.jdt.core.compiler.problem.nullReference=warning
# org.eclipse.jdt.core.compiler.problem.nullSpecViolation=warning
# org.eclipse.jdt.core.compiler.problem.nullUncheckedConversion=warning
# org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
org.eclipse.jdt.core.formatter.lineSplit=140
org.eclipse.jdt.core.formatter.tabulation.char=space
org.eclipse.jdt.core.formatter.tabulation.size=4

View File

@ -17,7 +17,7 @@
* under the License.
*/
import com.carrotsearch.gradle.randomizedtesting.RandomizedTestingTask
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.test.RestSpecHack
@ -89,20 +89,10 @@ dependencies {
compile 'net.java.dev.jna:jna:4.1.0', optional
// TODO: remove these test deps and just depend on test-framework
testCompile(group: 'junit', name: 'junit', version: '4.11') {
transitive = false
testCompile("org.elasticsearch:test-framework:${version}") {
// tests use the locally compiled version of core
exclude group: 'org.elasticsearch', module: 'elasticsearch'
}
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testCompile("org.apache.lucene:lucene-test-framework:${versions.lucene}") {
exclude group: 'com.carrotsearch.randomizedtesting', module: 'junit4-ant'
}
testCompile(group: 'org.hamcrest', name: 'hamcrest-all', version: '1.3') {
exclude group: 'org.hamcrest', module: 'hamcrest-core'
}
testCompile 'com.google.jimfs:jimfs:1.0'
testCompile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
}
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked"
@ -129,5 +119,4 @@ integTest.mustRunAfter test
RestSpecHack.configureDependencies(project)
Task copyRestSpec = RestSpecHack.configureTask(project, true)
integTest.dependsOn copyRestSpec
test.dependsOn copyRestSpec

View File

@ -1,368 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.elasticsearch</groupId>
<artifactId>parent</artifactId>
<version>3.0.0-SNAPSHOT</version>
</parent>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<name>Elasticsearch: Core</name>
<description>Elasticsearch - Open Source, Distributed, RESTful Search Engine</description>
<properties>
<xlint.options>-Xlint:-cast,-deprecation,-fallthrough,-overrides,-rawtypes,-serial,-try,-unchecked</xlint.options>
</properties>
<dependencies>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.carrotsearch.randomizedtesting</groupId>
<artifactId>randomizedtesting-runner</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-test-framework</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.jimfs</groupId>
<artifactId>jimfs</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-backward-codecs</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analyzers-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-queries</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-memory</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-highlighter</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-queryparser</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-suggest</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-join</artifactId>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-spatial</artifactId>
</dependency>
<dependency>
<groupId>com.spatial4j</groupId>
<artifactId>spatial4j</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.vividsolutions</groupId>
<artifactId>jts</artifactId>
<optional>true</optional>
</dependency>
<!-- needed for templating -->
<dependency>
<groupId>com.github.spullara.mustache.java</groupId>
<artifactId>compiler</artifactId>
<optional>true</optional>
</dependency>
<!-- Lucene spatial -->
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>securesm</artifactId>
</dependency>
<dependency>
<groupId>com.carrotsearch</groupId>
<artifactId>hppc</artifactId>
</dependency>
<dependency>
<groupId>joda-time</groupId>
<artifactId>joda-time</artifactId>
</dependency>
<dependency>
<groupId>org.joda</groupId>
<artifactId>joda-convert</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-smile</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-yaml</artifactId>
<exclusions>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.dataformat</groupId>
<artifactId>jackson-dataformat-cbor</artifactId>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</dependency>
<dependency>
<groupId>com.ning</groupId>
<artifactId>compress-lzf</artifactId>
</dependency>
<dependency>
<groupId>com.tdunning</groupId>
<artifactId>t-digest</artifactId>
</dependency>
<dependency>
<groupId>org.hdrhistogram</groupId>
<artifactId>HdrHistogram</artifactId>
</dependency>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>apache-log4j-extras</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>net.java.dev.jna</groupId>
<artifactId>jna</artifactId>
<optional>true</optional>
</dependency>
</dependencies>
<build>
<resources>
<resource>
<directory>${project.basedir}/src/main/resources</directory>
<includes>
<include>es-build.properties</include>
</includes>
<filtering>true</filtering>
</resource>
<resource>
<directory>${project.basedir}/src/main/resources</directory>
<includes>
<include>**/*.*</include>
</includes>
</resource>
</resources>
<testResources>
<testResource>
<directory>${project.basedir}/src/test/resources</directory>
<includes>
<include>**/*.*</include>
</includes>
</testResource>
<testResource>
<directory>${elasticsearch.tools.directory}/rest-api-spec</directory>
<targetPath>rest-api-spec</targetPath>
<includes>
<include>api/*.json</include>
<include>test/**/*.yaml</include>
</includes>
</testResource>
<!-- shared test resources like log4j.properties -->
<testResource>
<directory>${elasticsearch.tools.directory}/shared-test-resources</directory>
<filtering>false</filtering>
</testResource>
</testResources>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-test-sources</id>
<goals>
<goal>test-jar</goal>
</goals>
<configuration>
<includes>
<include>org/elasticsearch/test/**/*</include>
<include>org/elasticsearch/bootstrap/BootstrapForTesting.class</include>
<include>org/elasticsearch/bootstrap/BootstrapForTesting$*.class</include>
<include>org/elasticsearch/common/cli/CliToolTestCase.class</include>
<include>org/elasticsearch/common/cli/CliToolTestCase$*.class</include>
</includes>
<excludes>
<!-- unit tests for yaml suite parser & rest spec parser need to be excluded -->
<exclude>org/elasticsearch/test/rest/test/**/*</exclude>
<!-- unit tests for test framework classes-->
<exclude>org/elasticsearch/test/test/**/*</exclude>
</excludes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<phase>prepare-package</phase>
<goals>
<goal>test-jar</goal>
</goals>
<configuration>
<includes>
<include>rest-api-spec/**/*</include>
<include>org/elasticsearch/test/**/*</include>
<include>org/elasticsearch/bootstrap/BootstrapForTesting.class</include>
<include>org/elasticsearch/bootstrap/BootstrapForTesting$*.class</include>
<include>org/elasticsearch/common/cli/CliToolTestCase.class</include>
<include>org/elasticsearch/common/cli/CliToolTestCase$*.class</include>
<include>org/elasticsearch/cluster/MockInternalClusterInfoService.class</include>
<include>org/elasticsearch/cluster/MockInternalClusterInfoService$*.class</include>
<include>org/elasticsearch/cluster/routing/TestShardRouting.class</include>
<include>org/elasticsearch/cluster/routing/TestShardRouting$*.class</include>
<include>org/elasticsearch/index/MockEngineFactoryPlugin.class</include>
<include>org/elasticsearch/search/MockSearchService.class</include>
<include>org/elasticsearch/search/MockSearchService$*.class</include>
<include>org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.class</include>
<include>org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.class</include>
<include>org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams$*.class</include>
<include>org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.class</include>
<include>org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams$*.class</include>
<include>org/elasticsearch/search/aggregations/bucket/script/TestScript.class</include>
<include>org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.class</include>
<include>org/elasticsearch/percolator/PercolatorTestUtil.class</include>
<include>org/elasticsearch/cache/recycler/MockPageCacheRecycler.class</include>
<include>org/elasticsearch/cache/recycler/MockPageCacheRecycler$*.class</include>
<include>org/elasticsearch/common/util/MockBigArrays.class</include>
<include>org/elasticsearch/common/util/MockBigArrays$*.class</include>
<include>org/elasticsearch/node/NodeMocksPlugin.class</include>
<include>org/elasticsearch/node/MockNode.class</include>
<include>org/elasticsearch/common/io/PathUtilsForTesting.class</include>
</includes>
<excludes>
<!-- unit tests for yaml suite parser & rest spec parser need to be excluded -->
<exclude>org/elasticsearch/test/rest/test/**/*</exclude>
<!-- unit tests for test framework classes-->
<exclude>org/elasticsearch/test/test/**/*</exclude>
</excludes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<!-- Don't run the license checker in core -->
<id>check-license</id>
<phase>none</phase>
</execution>
</executions>
</plugin>
</plugins>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<configuration>
<excludes>
<exclude>org/apache/lucene/**</exclude>
</excludes>
</configuration>
</plugin>
<plugin>
<groupId>com.mycila</groupId>
<artifactId>license-maven-plugin</artifactId>
<configuration>
<excludes>
<!-- Guice -->
<exclude>src/main/java/org/elasticsearch/common/inject/**</exclude>
<!-- Forks of Lucene classes -->
<exclude>src/main/java/org/apache/lucene/**/X*.java</exclude>
<!-- netty pipelining -->
<exclude>src/main/java/org/elasticsearch/http/netty/pipelining/**</exclude>
<!-- Guava -->
<exclude>src/main/java/org/elasticsearch/common/network/InetAddresses.java</exclude>
<exclude>src/test/java/org/elasticsearch/common/network/InetAddressesTests.java</exclude>
<exclude>src/test/java/org/elasticsearch/common/collect/EvictingQueueTests.java</exclude>
<!-- Joda -->
<exclude>src/main/java/org/joda/time/base/BaseDateTime.java</exclude>
<exclude>src/main/java/org/joda/time/format/StrictISODateTimeFormat.java</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</pluginManagement>
</build>
<profiles>
<!-- license profile, to generate third party license file -->
<profile>
<id>license</id>
<activation>
<property>
<name>license.generation</name>
<value>true</value>
</property>
</activation>
<!-- not including license-maven-plugin is sufficent to expose default license -->
</profile>
</profiles>
</project>

View File

@ -24,32 +24,24 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.CharFilterFactoryFactory;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.TokenFilterFactoryFactory;
import org.elasticsearch.index.analysis.TokenizerFactory;
import org.elasticsearch.index.analysis.TokenizerFactoryFactory;
import org.elasticsearch.index.analysis.*;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -63,17 +55,15 @@ import java.util.List;
public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRequest, AnalyzeResponse> {
private final IndicesService indicesService;
private final IndicesAnalysisService indicesAnalysisService;
private static final Settings DEFAULT_SETTINGS = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
private final Environment environment;
@Inject
public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, IndicesAnalysisService indicesAnalysisService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
IndicesService indicesService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Environment environment) {
super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, AnalyzeRequest::new, ThreadPool.Names.INDEX);
this.indicesService = indicesService;
this.indicesAnalysisService = indicesAnalysisService;
this.environment = environment;
}
@Override
@ -105,53 +95,69 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
@Override
protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) {
IndexService indexService = null;
if (shardId != null) {
indexService = indicesService.indexServiceSafe(shardId.getIndex());
}
Analyzer analyzer = null;
boolean closeAnalyzer = false;
String field = null;
if (request.field() != null) {
if (indexService == null) {
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
try {
final IndexService indexService;
if (shardId != null) {
indexService = indicesService.indexServiceSafe(shardId.getIndex());
} else {
indexService = null;
}
MappedFieldType fieldType = indexService.mapperService().smartNameFieldType(request.field());
if (fieldType != null) {
if (fieldType.isNumeric()) {
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
String field = null;
Analyzer analyzer = null;
if (request.field() != null) {
if (indexService == null) {
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
}
MappedFieldType fieldType = indexService.mapperService().smartNameFieldType(request.field());
if (fieldType != null) {
if (fieldType.isNumeric()) {
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields");
}
analyzer = fieldType.indexAnalyzer();
field = fieldType.names().indexName();
}
analyzer = fieldType.indexAnalyzer();
field = fieldType.names().indexName();
}
}
if (field == null) {
if (indexService != null) {
field = indexService.queryParserService().defaultField();
} else {
field = AllFieldMapper.NAME;
if (field == null) {
if (indexService != null) {
field = indexService.queryParserService().defaultField();
} else {
field = AllFieldMapper.NAME;
}
}
final AnalysisRegistry analysisRegistry = indicesService.getAnalysis();
return analyze(request, field, analyzer, indexService != null ? indexService.analysisService() : null, analysisRegistry, environment);
} catch (IOException e) {
throw new ElasticsearchException("analysis failed", e);
}
}
public static AnalyzeResponse analyze(AnalyzeRequest request, String field, Analyzer analyzer, AnalysisService analysisService, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
boolean closeAnalyzer = false;
if (analyzer == null && request.analyzer() != null) {
if (indexService == null) {
analyzer = indicesAnalysisService.analyzer(request.analyzer());
if (analysisService == null) {
analyzer = analysisRegistry.getAnalyzer(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find global analyzer [" + request.analyzer() + "]");
}
} else {
analyzer = indexService.analysisService().analyzer(request.analyzer());
}
if (analyzer == null) {
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
analyzer = analysisService.analyzer(request.analyzer());
if (analyzer == null) {
throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]");
}
}
} else if (request.tokenizer() != null) {
TokenizerFactory tokenizerFactory;
if (indexService == null) {
TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer());
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(request.tokenizer());
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
}
tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), DEFAULT_SETTINGS);
tokenizerFactory = tokenizerFactoryFactory.get(environment, request.tokenizer());
} else {
tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer());
tokenizerFactory = analysisService.tokenizer(request.tokenizer());
if (tokenizerFactory == null) {
throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
}
@ -162,14 +168,14 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().length];
for (int i = 0; i < request.tokenFilters().length; i++) {
String tokenFilterName = request.tokenFilters()[i];
if (indexService == null) {
TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName);
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilterName);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, DEFAULT_SETTINGS);
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilterName);
} else {
tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName);
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilterName);
if (tokenFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
}
@ -185,20 +191,20 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
charFilterFactories = new CharFilterFactory[request.charFilters().length];
for (int i = 0; i < request.charFilters().length; i++) {
String charFilterName = request.charFilters()[i];
if (indexService == null) {
CharFilterFactoryFactory charFilterFactoryFactory = indicesAnalysisService.charFilterFactoryFactory(charFilterName);
if (analysisService == null) {
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilterName);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.create(charFilterName, DEFAULT_SETTINGS);
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilterName);
} else {
charFilterFactories[i] = indexService.analysisService().charFilter(charFilterName);
charFilterFactories[i] = analysisService.charFilter(charFilterName);
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
}
}
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]");
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
}
}
}
@ -206,10 +212,10 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
closeAnalyzer = true;
} else if (analyzer == null) {
if (indexService == null) {
analyzer = indicesAnalysisService.analyzer("standard");
if (analysisService == null) {
analyzer = analysisRegistry.getAnalyzer("standard");
} else {
analyzer = indexService.analysisService().defaultIndexAnalyzer();
analyzer = analysisService.defaultIndexAnalyzer();
}
}
if (analyzer == null) {

View File

@ -22,9 +22,9 @@ package org.elasticsearch.action.admin.indices.validate.query;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
@ -34,7 +34,7 @@ import java.io.IOException;
*/
public class ShardValidateQueryRequest extends BroadcastShardRequest {
private BytesReference source;
private QueryBuilder<?> query;
private String[] types = Strings.EMPTY_ARRAY;
private boolean explain;
private boolean rewrite;
@ -49,7 +49,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
ShardValidateQueryRequest(ShardId shardId, @Nullable String[] filteringAliases, ValidateQueryRequest request) {
super(shardId, request);
this.source = request.source();
this.query = request.query();
this.types = request.types();
this.explain = request.explain();
this.rewrite = request.rewrite();
@ -57,8 +57,8 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
this.nowInMillis = request.nowInMillis;
}
public BytesReference source() {
return source;
public QueryBuilder<?> query() {
return query;
}
public String[] types() {
@ -84,7 +84,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
source = in.readBytesReference();
query = in.readQuery();
int typesSize = in.readVInt();
if (typesSize > 0) {
@ -109,7 +109,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest {
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBytesReference(source);
out.writeQuery(query);
out.writeVInt(types.length);
for (String type : types) {

View File

@ -36,6 +36,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
@ -43,7 +44,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.script.ScriptService;
@ -178,9 +178,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
);
SearchContext.setCurrent(searchContext);
try {
if (request.source() != null && request.source().length() > 0) {
searchContext.parsedQuery(queryParserService.parseTopLevelQuery(request.source()));
}
searchContext.parsedQuery(queryParserService.toQuery(request.query()));
searchContext.preProcess();
valid = true;

View File

@ -19,34 +19,27 @@
package org.elasticsearch.action.admin.indices.validate.query;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.QuerySourceBuilder;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
/**
* A request to validate a specific query.
* <p>
* The request requires the query source to be set either using {@link #source(QuerySourceBuilder)},
* or {@link #source(byte[])}.
* The request requires the query to be set using {@link #query(QueryBuilder)}
*/
public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest> {
private BytesReference source;
private QueryBuilder<?> query = new MatchAllQueryBuilder();
private boolean explain;
private boolean rewrite;
@ -71,67 +64,21 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (query == null) {
validationException = ValidateActions.addValidationError("query cannot be null", validationException);
}
return validationException;
}
/**
* The source to execute.
* The query to validate.
*/
public BytesReference source() {
return source;
public QueryBuilder<?> query() {
return query;
}
public ValidateQueryRequest source(QuerySourceBuilder sourceBuilder) {
this.source = sourceBuilder.buildAsBytes(Requests.CONTENT_TYPE);
return this;
}
/**
* The source to execute in the form of a map.
*/
public ValidateQueryRequest source(Map source) {
try {
XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE);
builder.map(source);
return source(builder);
} catch (IOException e) {
throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e);
}
}
public ValidateQueryRequest source(XContentBuilder builder) {
this.source = builder.bytes();
return this;
}
/**
* The query source to validate. It is preferable to use either {@link #source(byte[])}
* or {@link #source(QuerySourceBuilder)}.
*/
public ValidateQueryRequest source(String source) {
this.source = new BytesArray(source);
return this;
}
/**
* The source to validate.
*/
public ValidateQueryRequest source(byte[] source) {
return source(source, 0, source.length);
}
/**
* The source to validate.
*/
public ValidateQueryRequest source(byte[] source, int offset, int length) {
return source(new BytesArray(source, offset, length));
}
/**
* The source to validate.
*/
public ValidateQueryRequest source(BytesReference source) {
this.source = source;
public ValidateQueryRequest query(QueryBuilder<?> query) {
this.query = query;
return this;
}
@ -181,9 +128,7 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
source = in.readBytesReference();
query = in.readQuery();
int typesSize = in.readVInt();
if (typesSize > 0) {
types = new String[typesSize];
@ -191,7 +136,6 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
types[i] = in.readString();
}
}
explain = in.readBoolean();
rewrite = in.readBoolean();
}
@ -199,27 +143,18 @@ public class ValidateQueryRequest extends BroadcastRequest<ValidateQueryRequest>
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBytesReference(source);
out.writeQuery(query);
out.writeVInt(types.length);
for (String type : types) {
out.writeString(type);
}
out.writeBoolean(explain);
out.writeBoolean(rewrite);
}
@Override
public String toString() {
String sSource = "_na_";
try {
sSource = XContentHelper.convertToJson(source, false);
} catch (Exception e) {
// ignore
}
return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", source[" + sSource + "], explain:" + explain +
return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", query[" + query + "], explain:" + explain +
", rewrite:" + rewrite;
}
}

View File

@ -19,10 +19,8 @@
package org.elasticsearch.action.admin.indices.validate.query;
import org.elasticsearch.action.support.QuerySourceBuilder;
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.query.QueryBuilder;
/**
@ -30,8 +28,6 @@ import org.elasticsearch.index.query.QueryBuilder;
*/
public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder<ValidateQueryRequest, ValidateQueryResponse, ValidateQueryRequestBuilder> {
private QuerySourceBuilder sourceBuilder;
public ValidateQueryRequestBuilder(ElasticsearchClient client, ValidateQueryAction action) {
super(client, action, new ValidateQueryRequest());
}
@ -45,32 +41,12 @@ public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilde
}
/**
* The query source to validate.
* The query to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setQuery(QueryBuilder queryBuilder) {
sourceBuilder().setQuery(queryBuilder);
return this;
}
/**
* The source to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setSource(BytesReference source) {
request().source(source);
return this;
}
/**
* The source to validate.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public ValidateQueryRequestBuilder setSource(byte[] source) {
request.source(source);
request.query(queryBuilder);
return this;
}
@ -91,19 +67,4 @@ public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilde
request.rewrite(rewrite);
return this;
}
@Override
protected ValidateQueryRequest beforeExecute(ValidateQueryRequest request) {
if (sourceBuilder != null) {
request.source(sourceBuilder);
}
return request;
}
private QuerySourceBuilder sourceBuilder() {
if (sourceBuilder == null) {
sourceBuilder = new QuerySourceBuilder();
}
return sourceBuilder;
}
}

View File

@ -108,23 +108,6 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
return this;
}
/**
* Should the source be transformed using the script to used at index time
* (if any)? Note that calling this without having called setFetchSource
* will automatically turn on source fetching.
*
* @return this for chaining
*/
public GetRequestBuilder setTransformSource(boolean transform) {
FetchSourceContext context = request.fetchSourceContext();
if (context == null) {
context = new FetchSourceContext(true);
request.fetchSourceContext(context);
}
context.transformSource(transform);
return this;
}
/**
* Indicate that _source should be returned, with an "include" and/or "exclude" set which can include simple wildcard
* elements.

View File

@ -1,67 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.QueryBuilder;
import java.io.IOException;
public class QuerySourceBuilder extends ToXContentToBytes {
private QueryBuilder queryBuilder;
private BytesReference queryBinary;
public QuerySourceBuilder setQuery(QueryBuilder query) {
this.queryBuilder = query;
return this;
}
public QuerySourceBuilder setQuery(BytesReference queryBinary) {
this.queryBinary = queryBinary;
return this;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
innerToXContent(builder, params);
builder.endObject();
return builder;
}
public void innerToXContent(XContentBuilder builder, Params params) throws IOException {
if (queryBuilder != null) {
builder.field("query");
queryBuilder.toXContent(builder, params);
}
if (queryBinary != null) {
if (XContentFactory.xContentType(queryBinary) == builder.contentType()) {
builder.rawField("query", queryBinary);
} else {
builder.field("query_binary", queryBinary);
}
}
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.termvectors;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.DocumentRequest;
@ -211,7 +210,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
public String id() {
return id;
}
/**
* Sets the id of document the term vector is requested for.
*/
@ -651,7 +650,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
if (e.getValue() instanceof String) {
mapStrStr.put(e.getKey(), (String) e.getValue());
} else {
throw new ElasticsearchException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass());
throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass());
}
}
return mapStrStr;

View File

@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
import java.util.Collections;
import java.util.Set;
@ -49,13 +48,9 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
* are restored from a repository.
*/
public class MetaDataIndexUpgradeService extends AbstractComponent {
private final ScriptService scriptService;
@Inject
public MetaDataIndexUpgradeService(Settings settings, ScriptService scriptService) {
public MetaDataIndexUpgradeService(Settings settings) {
super(settings);
this.scriptService = scriptService;
}
/**
@ -221,9 +216,8 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings, Collections.EMPTY_LIST);
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.EMPTY_MAP);
try (AnalysisService analysisService = new FakeAnalysisService(indexSettings)) {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService, scriptService)) {
try (MapperService mapperService = new MapperService(indexSettings, analysisService, similarityService)) {
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value;
mapperService.merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
@ -257,7 +251,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
};
public FakeAnalysisService(IndexSettings indexSettings) {
super(indexSettings);
super(indexSettings, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
}
@Override

View File

@ -466,8 +466,8 @@ public class DiskThresholdDecider extends AllocationDecider {
// If this node is already above the high threshold, the shard cannot remain (get it off!)
final double freeDiskPercentage = usage.getFreeDiskAsPercentage();
final long freeBytes = usage.getFreeBytes();
if (logger.isDebugEnabled()) {
logger.debug("node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
if (logger.isTraceEnabled()) {
logger.trace("node [{}] has {}% free disk ({} bytes)", node.nodeId(), freeDiskPercentage, freeBytes);
}
if (dataPath == null || usage.getPath().equals(dataPath) == false) {
return allocation.decision(Decision.YES, NAME, "shard is not allocated on the most utilized disk");

View File

@ -237,7 +237,7 @@ public class Cache<K, V> {
}).get();
}
} catch (ExecutionException | InterruptedException e) {
throw new IllegalStateException("future should be a completedFuture for which get should not throw", e);
throw new IllegalStateException(e);
}
}
return Tuple.tuple(entry, existing);

View File

@ -20,8 +20,11 @@
package org.elasticsearch.index;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
@ -41,6 +44,7 @@ import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import java.io.IOException;
import java.util.*;
import java.util.function.BiFunction;
import java.util.function.Consumer;
@ -69,6 +73,7 @@ public final class IndexModule extends AbstractModule {
private final IndexSettings indexSettings;
private final IndexStoreConfig indexStoreConfig;
private final IndicesQueryCache indicesQueryCache;
private final AnalysisRegistry analysisRegistry;
// pkg private so tests can mock
Class<? extends EngineFactory> engineFactoryImpl = InternalEngineFactory.class;
private SetOnce<IndexSearcherWrapperFactory> indexSearcherWrapper = new SetOnce<>();
@ -81,11 +86,12 @@ public final class IndexModule extends AbstractModule {
private IndicesWarmer indicesWarmer;
public IndexModule(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig, IndicesQueryCache indicesQueryCache, IndicesWarmer warmer) {
public IndexModule(IndexSettings indexSettings, IndexStoreConfig indexStoreConfig, IndicesQueryCache indicesQueryCache, IndicesWarmer warmer, AnalysisRegistry analysisRegistry) {
this.indexStoreConfig = indexStoreConfig;
this.indexSettings = indexSettings;
this.indicesQueryCache = indicesQueryCache;
this.indicesWarmer = warmer;
this.analysisRegistry = analysisRegistry;
registerQueryCache(INDEX_QUERY_CACHE, IndexQueryCache::new);
registerQueryCache(NONE_QUERY_CACHE, (a, b) -> new NoneQueryCache(a));
}
@ -216,6 +222,11 @@ public final class IndexModule extends AbstractModule {
@Override
protected void configure() {
try {
bind(AnalysisService.class).toInstance(analysisRegistry.build(indexSettings));
} catch (IOException e) {
throw new ElasticsearchException("can't create analysis service", e);
}
bind(EngineFactory.class).to(engineFactoryImpl).asEagerSingleton();
bind(IndexSearcherWrapperFactory.class).toInstance(indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get());
bind(IndexEventListener.class).toInstance(freeze());

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -32,8 +31,7 @@ import org.elasticsearch.index.IndexSettings;
public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean preserveOriginal;
@Inject
public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
preserveOriginal = settings.getAsBoolean("preserve_original", false);
}

View File

@ -1,507 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Scopes;
import org.elasticsearch.common.inject.assistedinject.FactoryProvider;
import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Objects;
/**
*
*/
public class AnalysisModule extends AbstractModule {
public static class AnalysisBinderProcessor {
public void processCharFilters(CharFiltersBindings charFiltersBindings) {
}
public static class CharFiltersBindings {
private final Map<String, Class<? extends CharFilterFactory>> charFilters = new HashMap<>();
public CharFiltersBindings() {
}
public void processCharFilter(String name, Class<? extends CharFilterFactory> charFilterFactory) {
charFilters.put(name, charFilterFactory);
}
}
public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
}
public static class TokenFiltersBindings {
private final Map<String, Class<? extends TokenFilterFactory>> tokenFilters = new HashMap<>();
public TokenFiltersBindings() {
}
public void processTokenFilter(String name, Class<? extends TokenFilterFactory> tokenFilterFactory) {
tokenFilters.put(name, tokenFilterFactory);
}
}
public void processTokenizers(TokenizersBindings tokenizersBindings) {
}
public static class TokenizersBindings {
private final Map<String, Class<? extends TokenizerFactory>> tokenizers = new HashMap<>();
public TokenizersBindings() {
}
public void processTokenizer(String name, Class<? extends TokenizerFactory> tokenizerFactory) {
tokenizers.put(name, tokenizerFactory);
}
}
public void processAnalyzers(AnalyzersBindings analyzersBindings) {
}
public static class AnalyzersBindings {
private final Map<String, Class<? extends AnalyzerProvider>> analyzers = new HashMap<>();
public AnalyzersBindings() {
}
public void processAnalyzer(String name, Class<? extends AnalyzerProvider> analyzerProvider) {
analyzers.put(name, analyzerProvider);
}
}
}
private final Settings settings;
private final IndicesAnalysisService indicesAnalysisService;
private final LinkedList<AnalysisBinderProcessor> processors = new LinkedList<>();
private final Map<String, Class<? extends CharFilterFactory>> charFilters = new HashMap<>();
private final Map<String, Class<? extends TokenFilterFactory>> tokenFilters = new HashMap<>();
private final Map<String, Class<? extends TokenizerFactory>> tokenizers = new HashMap<>();
private final Map<String, Class<? extends AnalyzerProvider>> analyzers = new HashMap<>();
public AnalysisModule(Settings settings, IndicesAnalysisService indicesAnalysisService) {
Objects.requireNonNull(indicesAnalysisService);
this.settings = settings;
this.indicesAnalysisService = indicesAnalysisService;
processors.add(new DefaultProcessor());
try {
processors.add(new ExtendedProcessor());
} catch (Throwable t) {
// ignore. no extended ones
}
}
public AnalysisModule addProcessor(AnalysisBinderProcessor processor) {
processors.addFirst(processor);
return this;
}
public AnalysisModule addCharFilter(String name, Class<? extends CharFilterFactory> charFilter) {
charFilters.put(name, charFilter);
return this;
}
public AnalysisModule addTokenFilter(String name, Class<? extends TokenFilterFactory> tokenFilter) {
tokenFilters.put(name, tokenFilter);
return this;
}
public AnalysisModule addTokenizer(String name, Class<? extends TokenizerFactory> tokenizer) {
tokenizers.put(name, tokenizer);
return this;
}
public AnalysisModule addAnalyzer(String name, Class<? extends AnalyzerProvider> analyzer) {
analyzers.put(name, analyzer);
return this;
}
@Override
protected void configure() {
MapBinder<String, CharFilterFactoryFactory> charFilterBinder
= MapBinder.newMapBinder(binder(), String.class, CharFilterFactoryFactory.class);
// CHAR FILTERS
AnalysisBinderProcessor.CharFiltersBindings charFiltersBindings = new AnalysisBinderProcessor.CharFiltersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processCharFilters(charFiltersBindings);
}
charFiltersBindings.charFilters.putAll(charFilters);
Map<String, Settings> charFiltersSettings = settings.getGroups("index.analysis.char_filter");
for (Map.Entry<String, Settings> entry : charFiltersSettings.entrySet()) {
String charFilterName = entry.getKey();
Settings charFilterSettings = entry.getValue();
String typeName = charFilterSettings.get("type");
if (typeName == null) {
throw new IllegalArgumentException("CharFilter [" + charFilterName + "] must have a type associated with it");
}
Class<? extends CharFilterFactory> type = charFiltersBindings.charFilters.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown CharFilter type [" + typeName + "] for [" + charFilterName + "]");
}
charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the char filters in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends CharFilterFactory>> entry : charFiltersBindings.charFilters.entrySet()) {
String charFilterName = entry.getKey();
Class<? extends CharFilterFactory> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (charFiltersSettings.containsKey(charFilterName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasCharFilter(charFilterName) == false) {
charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
// TOKEN FILTERS
MapBinder<String, TokenFilterFactoryFactory> tokenFilterBinder
= MapBinder.newMapBinder(binder(), String.class, TokenFilterFactoryFactory.class);
// initial default bindings
AnalysisBinderProcessor.TokenFiltersBindings tokenFiltersBindings = new AnalysisBinderProcessor.TokenFiltersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processTokenFilters(tokenFiltersBindings);
}
tokenFiltersBindings.tokenFilters.putAll(tokenFilters);
Map<String, Settings> tokenFiltersSettings = settings.getGroups("index.analysis.filter");
for (Map.Entry<String, Settings> entry : tokenFiltersSettings.entrySet()) {
String tokenFilterName = entry.getKey();
Settings tokenFilterSettings = entry.getValue();
String typeName = tokenFilterSettings.get("type");
if (typeName == null) {
throw new IllegalArgumentException("TokenFilter [" + tokenFilterName + "] must have a type associated with it");
}
Class<? extends TokenFilterFactory> type = tokenFiltersBindings.tokenFilters.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown TokenFilter type [" + typeName + "] for [" + tokenFilterName + "]");
}
tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the filters in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends TokenFilterFactory>> entry : tokenFiltersBindings.tokenFilters.entrySet()) {
String tokenFilterName = entry.getKey();
Class<? extends TokenFilterFactory> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (tokenFiltersSettings.containsKey(tokenFilterName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasTokenFilter(tokenFilterName) == false) {
tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
// TOKENIZER
MapBinder<String, TokenizerFactoryFactory> tokenizerBinder
= MapBinder.newMapBinder(binder(), String.class, TokenizerFactoryFactory.class);
// initial default bindings
AnalysisBinderProcessor.TokenizersBindings tokenizersBindings = new AnalysisBinderProcessor.TokenizersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processTokenizers(tokenizersBindings);
}
tokenizersBindings.tokenizers.putAll(tokenizers);
Map<String, Settings> tokenizersSettings = settings.getGroups("index.analysis.tokenizer");
for (Map.Entry<String, Settings> entry : tokenizersSettings.entrySet()) {
String tokenizerName = entry.getKey();
Settings tokenizerSettings = entry.getValue();
String typeName = tokenizerSettings.get("type");
if (typeName == null) {
throw new IllegalArgumentException("Tokenizer [" + tokenizerName + "] must have a type associated with it");
}
Class<? extends TokenizerFactory> type = tokenizersBindings.tokenizers.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown Tokenizer type [" + typeName + "] for [" + tokenizerName + "]");
}
tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the tokenizers in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends TokenizerFactory>> entry : tokenizersBindings.tokenizers.entrySet()) {
String tokenizerName = entry.getKey();
Class<? extends TokenizerFactory> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (tokenizersSettings.containsKey(tokenizerName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasTokenizer(tokenizerName) == false) {
tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
// ANALYZER
MapBinder<String, AnalyzerProviderFactory> analyzerBinder
= MapBinder.newMapBinder(binder(), String.class, AnalyzerProviderFactory.class);
// initial default bindings
AnalysisBinderProcessor.AnalyzersBindings analyzersBindings = new AnalysisBinderProcessor.AnalyzersBindings();
for (AnalysisBinderProcessor processor : processors) {
processor.processAnalyzers(analyzersBindings);
}
analyzersBindings.analyzers.putAll(analyzers);
Map<String, Settings> analyzersSettings = settings.getGroups("index.analysis.analyzer");
for (Map.Entry<String, Settings> entry : analyzersSettings.entrySet()) {
String analyzerName = entry.getKey();
Settings analyzerSettings = entry.getValue();
String typeName = analyzerSettings.get("type");
Class<? extends AnalyzerProvider> type;
if (typeName == null) {
if (analyzerSettings.get("tokenizer") != null) {
// custom analyzer, need to add it
type = CustomAnalyzerProvider.class;
} else {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] must have a type associated with it");
}
} else if (typeName.equals("custom")) {
type = CustomAnalyzerProvider.class;
} else {
type = analyzersBindings.analyzers.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown Analyzer type [" + typeName + "] for [" + analyzerName + "]");
}
}
analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, type)).in(Scopes.SINGLETON);
}
// go over the analyzers in the bindings and register the ones that are not configured
for (Map.Entry<String, Class<? extends AnalyzerProvider>> entry : analyzersBindings.analyzers.entrySet()) {
String analyzerName = entry.getKey();
Class<? extends AnalyzerProvider> clazz = entry.getValue();
// we don't want to re-register one that already exists
if (analyzersSettings.containsKey(analyzerName)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (clazz.getAnnotation(AnalysisSettingsRequired.class) != null) {
continue;
}
// register if it's not builtin
if (indicesAnalysisService.hasAnalyzer(analyzerName) == false) {
analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, clazz)).in(Scopes.SINGLETON);
}
}
bind(AnalysisService.class).in(Scopes.SINGLETON);
}
private static class DefaultProcessor extends AnalysisBinderProcessor {
@Override
public void processCharFilters(CharFiltersBindings charFiltersBindings) {
charFiltersBindings.processCharFilter("html_strip", HtmlStripCharFilterFactory.class);
charFiltersBindings.processCharFilter("pattern_replace", PatternReplaceCharFilterFactory.class);
}
@Override
public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
tokenFiltersBindings.processTokenFilter("stop", StopTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("reverse", ReverseTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("asciifolding", ASCIIFoldingTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("length", LengthTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("lowercase", LowerCaseTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("uppercase", UpperCaseTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("porter_stem", PorterStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("kstem", KStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("standard", StandardTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("nGram", NGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("ngram", NGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("edgeNGram", EdgeNGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("edge_ngram", EdgeNGramTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("shingle", ShingleTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("unique", UniqueTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("truncate", TruncateTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("trim", TrimTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("limit", LimitTokenCountFilterFactory.class);
tokenFiltersBindings.processTokenFilter("common_grams", CommonGramsTokenFilterFactory.class);
}
@Override
public void processTokenizers(TokenizersBindings tokenizersBindings) {
tokenizersBindings.processTokenizer("standard", StandardTokenizerFactory.class);
tokenizersBindings.processTokenizer("uax_url_email", UAX29URLEmailTokenizerFactory.class);
tokenizersBindings.processTokenizer("path_hierarchy", PathHierarchyTokenizerFactory.class);
tokenizersBindings.processTokenizer("keyword", KeywordTokenizerFactory.class);
tokenizersBindings.processTokenizer("letter", LetterTokenizerFactory.class);
tokenizersBindings.processTokenizer("lowercase", LowerCaseTokenizerFactory.class);
tokenizersBindings.processTokenizer("whitespace", WhitespaceTokenizerFactory.class);
tokenizersBindings.processTokenizer("nGram", NGramTokenizerFactory.class);
tokenizersBindings.processTokenizer("ngram", NGramTokenizerFactory.class);
tokenizersBindings.processTokenizer("edgeNGram", EdgeNGramTokenizerFactory.class);
tokenizersBindings.processTokenizer("edge_ngram", EdgeNGramTokenizerFactory.class);
}
@Override
public void processAnalyzers(AnalyzersBindings analyzersBindings) {
analyzersBindings.processAnalyzer("default", StandardAnalyzerProvider.class);
analyzersBindings.processAnalyzer("standard", StandardAnalyzerProvider.class);
analyzersBindings.processAnalyzer("standard_html_strip", StandardHtmlStripAnalyzerProvider.class);
analyzersBindings.processAnalyzer("simple", SimpleAnalyzerProvider.class);
analyzersBindings.processAnalyzer("stop", StopAnalyzerProvider.class);
analyzersBindings.processAnalyzer("whitespace", WhitespaceAnalyzerProvider.class);
analyzersBindings.processAnalyzer("keyword", KeywordAnalyzerProvider.class);
}
}
private static class ExtendedProcessor extends AnalysisBinderProcessor {
@Override
public void processCharFilters(CharFiltersBindings charFiltersBindings) {
charFiltersBindings.processCharFilter("mapping", MappingCharFilterFactory.class);
}
@Override
public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
tokenFiltersBindings.processTokenFilter("snowball", SnowballTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("stemmer", StemmerTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("word_delimiter", WordDelimiterTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("delimited_payload_filter", DelimitedPayloadTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("synonym", SynonymTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("elision", ElisionTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("keep", KeepWordFilterFactory.class);
tokenFiltersBindings.processTokenFilter("keep_types", KeepTypesFilterFactory.class);
tokenFiltersBindings.processTokenFilter("pattern_capture", PatternCaptureGroupTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("pattern_replace", PatternReplaceTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("dictionary_decompounder", DictionaryCompoundWordTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("hyphenation_decompounder", HyphenationCompoundWordTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("arabic_stem", ArabicStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("brazilian_stem", BrazilianStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("czech_stem", CzechStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("dutch_stem", DutchStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("french_stem", FrenchStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("german_stem", GermanStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("russian_stem", RussianStemTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("keyword_marker", KeywordMarkerTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("stemmer_override", StemmerOverrideTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("arabic_normalization", ArabicNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("german_normalization", GermanNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("hindi_normalization", HindiNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("indic_normalization", IndicNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("sorani_normalization", SoraniNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("persian_normalization", PersianNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("scandinavian_normalization", ScandinavianNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("scandinavian_folding", ScandinavianFoldingFilterFactory.class);
tokenFiltersBindings.processTokenFilter("serbian_normalization", SerbianNormalizationFilterFactory.class);
tokenFiltersBindings.processTokenFilter("hunspell", HunspellTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("cjk_bigram", CJKBigramFilterFactory.class);
tokenFiltersBindings.processTokenFilter("cjk_width", CJKWidthFilterFactory.class);
tokenFiltersBindings.processTokenFilter("apostrophe", ApostropheFilterFactory.class);
tokenFiltersBindings.processTokenFilter("classic", ClassicFilterFactory.class);
tokenFiltersBindings.processTokenFilter("decimal_digit", DecimalDigitFilterFactory.class);
}
@Override
public void processTokenizers(TokenizersBindings tokenizersBindings) {
tokenizersBindings.processTokenizer("pattern", PatternTokenizerFactory.class);
tokenizersBindings.processTokenizer("classic", ClassicTokenizerFactory.class);
tokenizersBindings.processTokenizer("thai", ThaiTokenizerFactory.class);
}
@Override
public void processAnalyzers(AnalyzersBindings analyzersBindings) {
analyzersBindings.processAnalyzer("pattern", PatternAnalyzerProvider.class);
analyzersBindings.processAnalyzer("snowball", SnowballAnalyzerProvider.class);
analyzersBindings.processAnalyzer("arabic", ArabicAnalyzerProvider.class);
analyzersBindings.processAnalyzer("armenian", ArmenianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("basque", BasqueAnalyzerProvider.class);
analyzersBindings.processAnalyzer("brazilian", BrazilianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("bulgarian", BulgarianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("catalan", CatalanAnalyzerProvider.class);
analyzersBindings.processAnalyzer("chinese", ChineseAnalyzerProvider.class);
analyzersBindings.processAnalyzer("cjk", CjkAnalyzerProvider.class);
analyzersBindings.processAnalyzer("czech", CzechAnalyzerProvider.class);
analyzersBindings.processAnalyzer("danish", DanishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("dutch", DutchAnalyzerProvider.class);
analyzersBindings.processAnalyzer("english", EnglishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("finnish", FinnishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("french", FrenchAnalyzerProvider.class);
analyzersBindings.processAnalyzer("galician", GalicianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("german", GermanAnalyzerProvider.class);
analyzersBindings.processAnalyzer("greek", GreekAnalyzerProvider.class);
analyzersBindings.processAnalyzer("hindi", HindiAnalyzerProvider.class);
analyzersBindings.processAnalyzer("hungarian", HungarianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("indonesian", IndonesianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("irish", IrishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("italian", ItalianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("latvian", LatvianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("lithuanian", LithuanianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("norwegian", NorwegianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("persian", PersianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("portuguese", PortugueseAnalyzerProvider.class);
analyzersBindings.processAnalyzer("romanian", RomanianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("russian", RussianAnalyzerProvider.class);
analyzersBindings.processAnalyzer("sorani", SoraniAnalyzerProvider.class);
analyzersBindings.processAnalyzer("spanish", SpanishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("swedish", SwedishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("turkish", TurkishAnalyzerProvider.class);
analyzersBindings.processAnalyzer("thai", ThaiAnalyzerProvider.class);
}
}
}

View File

@ -0,0 +1,461 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
import org.elasticsearch.indices.analysis.*;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
/**
* An internal registry for tokenizer, token filter, char filter and analyzer.
* This class exists per node and allows to create per-index {@link AnalysisService} via {@link #build(IndexSettings)}
*/
public final class AnalysisRegistry implements Closeable {
private final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilters;
private final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters;
private final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizers;
private final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers;
private final Map<String, Analyzer> cachedAnalyzer = new ConcurrentHashMap<>();
private final PrebuiltAnalysis prebuiltAnalysis;
private final HunspellService hunspellService;
private final Environment environemnt;
public AnalysisRegistry(HunspellService hunspellService, Environment environment) {
this(hunspellService, environment, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
}
public AnalysisRegistry(HunspellService hunspellService, Environment environment,
Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilters,
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters,
Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizers,
Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers) {
prebuiltAnalysis = new PrebuiltAnalysis();
this.hunspellService = hunspellService;
this.environemnt = environment;
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterBuilder = new HashMap<>(charFilters);
final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilterBuilder = new HashMap<>(tokenFilters);
final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizerBuilder = new HashMap<>(tokenizers);
final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzerBuilder= new HashMap<>(analyzers);
registerBuiltInAnalyzer(analyzerBuilder);
registerBuiltInCharFilter(charFilterBuilder);
registerBuiltInTokenizer(tokenizerBuilder);
registerBuiltInTokenFilters(tokenFilterBuilder);
this.tokenFilters = Collections.unmodifiableMap(tokenFilterBuilder);
this.tokenizers = Collections.unmodifiableMap(tokenizerBuilder);
this.charFilters = Collections.unmodifiableMap(charFilterBuilder);
this.analyzers = Collections.unmodifiableMap(analyzerBuilder);
}
/**
* Returns a registered {@link TokenizerFactory} provider by name or <code>null</code> if the tokenizer was not registered
*/
public AnalysisModule.AnalysisProvider<TokenizerFactory> getTokenizerProvider(String tokenizer) {
return tokenizers.getOrDefault(tokenizer, this.prebuiltAnalysis.getTokenizerFactory(tokenizer));
}
/**
* Returns a registered {@link TokenFilterFactory} provider by name or <code>null</code> if the token filter was not registered
*/
public AnalysisModule.AnalysisProvider<TokenFilterFactory> getTokenFilterProvider(String tokenFilter) {
return tokenFilters.getOrDefault(tokenFilter, this.prebuiltAnalysis.getTokenFilterFactory(tokenFilter));
}
/**
* Returns a registered {@link CharFilterFactory} provider by name or <code>null</code> if the char filter was not registered
*/
public AnalysisModule.AnalysisProvider<CharFilterFactory> getCharFilterProvider(String charFilter) {
return charFilters.getOrDefault(charFilter, this.prebuiltAnalysis.getCharFilterFactory(charFilter));
}
/**
* Returns a registered {@link Analyzer} provider by name or <code>null</code> if the analyzer was not registered
*/
public Analyzer getAnalyzer(String analyzer) throws IOException {
AnalysisModule.AnalysisProvider<AnalyzerProvider> analyzerProvider = this.prebuiltAnalysis.getAnalyzerProvider(analyzer);
if (analyzerProvider == null) {
AnalysisModule.AnalysisProvider<AnalyzerProvider> provider = analyzers.get(analyzer);
return provider == null ? null : cachedAnalyzer.computeIfAbsent(analyzer, (key) -> {
try {
return provider.get(environemnt, key).get();
} catch (IOException ex) {
throw new ElasticsearchException("failed to load analyzer for name " + key, ex);
}}
);
}
return analyzerProvider.get(environemnt, analyzer).get();
}
@Override
public void close() throws IOException {
try {
prebuiltAnalysis.close();
} finally {
IOUtils.close(cachedAnalyzer.values());
}
}
/**
* Creates an index-level {@link AnalysisService} from this registry using the given index settings
*/
public AnalysisService build(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> charFiltersSettings = indexSettings.getSettings().getGroups("index.analysis.char_filter");
final Map<String, Settings> tokenFiltersSettings = indexSettings.getSettings().getGroups("index.analysis.filter");
final Map<String, Settings> tokenizersSettings = indexSettings.getSettings().getGroups("index.analysis.tokenizer");
final Map<String, Settings> analyzersSettings = indexSettings.getSettings().getGroups("index.analysis.analyzer");
final Map<String, CharFilterFactory> charFilterFactories = buildMapping(false, "charfilter", indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.charFilterFactories);
final Map<String, TokenizerFactory> tokenizerFactories = buildMapping(false, "tokenizer", indexSettings, tokenizersSettings, tokenizers, prebuiltAnalysis.tokenizerFactories);
Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters = new HashMap<>(this.tokenFilters);
/*
* synonym is different than everything else since it needs access to the tokenizer factories for this index.
* instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and
* hide internal data-structures as much as possible.
*/
tokenFilters.put("synonym", requriesAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, tokenizerFactories, name, settings)));
final Map<String, TokenFilterFactory> tokenFilterFactories = buildMapping(false, "tokenfilter", indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.tokenFilterFactories);
final Map<String, AnalyzerProvider> analyzierFactories = buildMapping(true, "analyzer", indexSettings, analyzersSettings, analyzers, prebuiltAnalysis.analyzerProviderFactories);
return new AnalysisService(indexSettings, analyzierFactories, tokenizerFactories, charFilterFactories, tokenFilterFactories);
}
private static <T> AnalysisModule.AnalysisProvider<T> requriesAnalysisSettings(AnalysisModule.AnalysisProvider<T> provider) {
return new AnalysisModule.AnalysisProvider<T>() {
@Override
public T get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
return provider.get(indexSettings, environment, name, settings);
}
@Override
public boolean requiresAnalysisSettings() {
return true;
}
};
}
private void registerBuiltInCharFilter(Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilters) {
charFilters.put("html_strip", HtmlStripCharFilterFactory::new);
charFilters.put("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new));
charFilters.put("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new));
}
private void registerBuiltInTokenizer(Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizers) {
tokenizers.put("standard", StandardTokenizerFactory::new);
tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new);
tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new);
tokenizers.put("keyword", KeywordTokenizerFactory::new);
tokenizers.put("letter", LetterTokenizerFactory::new);
tokenizers.put("lowercase", LowerCaseTokenizerFactory::new);
tokenizers.put("whitespace", WhitespaceTokenizerFactory::new);
tokenizers.put("nGram", NGramTokenizerFactory::new);
tokenizers.put("ngram", NGramTokenizerFactory::new);
tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new);
tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new);
tokenizers.put("pattern", PatternTokenizerFactory::new);
tokenizers.put("classic", ClassicTokenizerFactory::new);
tokenizers.put("thai", ThaiTokenizerFactory::new);
}
private void registerBuiltInTokenFilters(Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilters) {
tokenFilters.put("stop", StopTokenFilterFactory::new);
tokenFilters.put("reverse", ReverseTokenFilterFactory::new);
tokenFilters.put("asciifolding", ASCIIFoldingTokenFilterFactory::new);
tokenFilters.put("length", LengthTokenFilterFactory::new);
tokenFilters.put("lowercase", LowerCaseTokenFilterFactory::new);
tokenFilters.put("uppercase", UpperCaseTokenFilterFactory::new);
tokenFilters.put("porter_stem", PorterStemTokenFilterFactory::new);
tokenFilters.put("kstem", KStemTokenFilterFactory::new);
tokenFilters.put("standard", StandardTokenFilterFactory::new);
tokenFilters.put("nGram", NGramTokenFilterFactory::new);
tokenFilters.put("ngram", NGramTokenFilterFactory::new);
tokenFilters.put("edgeNGram", EdgeNGramTokenFilterFactory::new);
tokenFilters.put("edge_ngram", EdgeNGramTokenFilterFactory::new);
tokenFilters.put("shingle", ShingleTokenFilterFactory::new);
tokenFilters.put("unique", UniqueTokenFilterFactory::new);
tokenFilters.put("truncate", requriesAnalysisSettings(TruncateTokenFilterFactory::new));
tokenFilters.put("trim", TrimTokenFilterFactory::new);
tokenFilters.put("limit", LimitTokenCountFilterFactory::new);
tokenFilters.put("common_grams", requriesAnalysisSettings(CommonGramsTokenFilterFactory::new));
tokenFilters.put("snowball", SnowballTokenFilterFactory::new);
tokenFilters.put("stemmer", StemmerTokenFilterFactory::new);
tokenFilters.put("word_delimiter", WordDelimiterTokenFilterFactory::new);
tokenFilters.put("delimited_payload_filter", DelimitedPayloadTokenFilterFactory::new);
tokenFilters.put("elision", ElisionTokenFilterFactory::new);
tokenFilters.put("keep", requriesAnalysisSettings(KeepWordFilterFactory::new));
tokenFilters.put("keep_types", requriesAnalysisSettings(KeepTypesFilterFactory::new));
tokenFilters.put("pattern_capture", requriesAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new));
tokenFilters.put("pattern_replace", requriesAnalysisSettings(PatternReplaceTokenFilterFactory::new));
tokenFilters.put("dictionary_decompounder", requriesAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new));
tokenFilters.put("hyphenation_decompounder", requriesAnalysisSettings(HyphenationCompoundWordTokenFilterFactory::new));
tokenFilters.put("arabic_stem", ArabicStemTokenFilterFactory::new);
tokenFilters.put("brazilian_stem", BrazilianStemTokenFilterFactory::new);
tokenFilters.put("czech_stem", CzechStemTokenFilterFactory::new);
tokenFilters.put("dutch_stem", DutchStemTokenFilterFactory::new);
tokenFilters.put("french_stem", FrenchStemTokenFilterFactory::new);
tokenFilters.put("german_stem", GermanStemTokenFilterFactory::new);
tokenFilters.put("russian_stem", RussianStemTokenFilterFactory::new);
tokenFilters.put("keyword_marker", requriesAnalysisSettings(KeywordMarkerTokenFilterFactory::new));
tokenFilters.put("stemmer_override", requriesAnalysisSettings(StemmerOverrideTokenFilterFactory::new));
tokenFilters.put("arabic_normalization", ArabicNormalizationFilterFactory::new);
tokenFilters.put("german_normalization", GermanNormalizationFilterFactory::new);
tokenFilters.put("hindi_normalization", HindiNormalizationFilterFactory::new);
tokenFilters.put("indic_normalization", IndicNormalizationFilterFactory::new);
tokenFilters.put("sorani_normalization", SoraniNormalizationFilterFactory::new);
tokenFilters.put("persian_normalization", PersianNormalizationFilterFactory::new);
tokenFilters.put("scandinavian_normalization", ScandinavianNormalizationFilterFactory::new);
tokenFilters.put("scandinavian_folding", ScandinavianFoldingFilterFactory::new);
tokenFilters.put("serbian_normalization", SerbianNormalizationFilterFactory::new);
if (hunspellService != null) {
tokenFilters.put("hunspell", requriesAnalysisSettings((indexSettings, env, name, settings) -> new HunspellTokenFilterFactory(indexSettings, name, settings, hunspellService)));
}
tokenFilters.put("cjk_bigram", CJKBigramFilterFactory::new);
tokenFilters.put("cjk_width", CJKWidthFilterFactory::new);
tokenFilters.put("apostrophe", ApostropheFilterFactory::new);
tokenFilters.put("classic", ClassicFilterFactory::new);
tokenFilters.put("decimal_digit", DecimalDigitFilterFactory::new);
}
private void registerBuiltInAnalyzer(Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzers) {
analyzers.put("default", StandardAnalyzerProvider::new);
analyzers.put("standard", StandardAnalyzerProvider::new);
analyzers.put("standard_html_strip", StandardHtmlStripAnalyzerProvider::new);
analyzers.put("simple", SimpleAnalyzerProvider::new);
analyzers.put("stop", StopAnalyzerProvider::new);
analyzers.put("whitespace", WhitespaceAnalyzerProvider::new);
analyzers.put("keyword", KeywordAnalyzerProvider::new);
analyzers.put("pattern", PatternAnalyzerProvider::new);
analyzers.put("snowball", SnowballAnalyzerProvider::new);
analyzers.put("arabic", ArabicAnalyzerProvider::new);
analyzers.put("armenian", ArmenianAnalyzerProvider::new);
analyzers.put("basque", BasqueAnalyzerProvider::new);
analyzers.put("brazilian", BrazilianAnalyzerProvider::new);
analyzers.put("bulgarian", BulgarianAnalyzerProvider::new);
analyzers.put("catalan", CatalanAnalyzerProvider::new);
analyzers.put("chinese", ChineseAnalyzerProvider::new);
analyzers.put("cjk", CjkAnalyzerProvider::new);
analyzers.put("czech", CzechAnalyzerProvider::new);
analyzers.put("danish", DanishAnalyzerProvider::new);
analyzers.put("dutch", DutchAnalyzerProvider::new);
analyzers.put("english", EnglishAnalyzerProvider::new);
analyzers.put("finnish", FinnishAnalyzerProvider::new);
analyzers.put("french", FrenchAnalyzerProvider::new);
analyzers.put("galician", GalicianAnalyzerProvider::new);
analyzers.put("german", GermanAnalyzerProvider::new);
analyzers.put("greek", GreekAnalyzerProvider::new);
analyzers.put("hindi", HindiAnalyzerProvider::new);
analyzers.put("hungarian", HungarianAnalyzerProvider::new);
analyzers.put("indonesian", IndonesianAnalyzerProvider::new);
analyzers.put("irish", IrishAnalyzerProvider::new);
analyzers.put("italian", ItalianAnalyzerProvider::new);
analyzers.put("latvian", LatvianAnalyzerProvider::new);
analyzers.put("lithuanian", LithuanianAnalyzerProvider::new);
analyzers.put("norwegian", NorwegianAnalyzerProvider::new);
analyzers.put("persian", PersianAnalyzerProvider::new);
analyzers.put("portuguese", PortugueseAnalyzerProvider::new);
analyzers.put("romanian", RomanianAnalyzerProvider::new);
analyzers.put("russian", RussianAnalyzerProvider::new);
analyzers.put("sorani", SoraniAnalyzerProvider::new);
analyzers.put("spanish", SpanishAnalyzerProvider::new);
analyzers.put("swedish", SwedishAnalyzerProvider::new);
analyzers.put("turkish", TurkishAnalyzerProvider::new);
analyzers.put("thai", ThaiAnalyzerProvider::new);
}
private <T> Map<String, T> buildMapping(boolean analyzer, String toBuild, IndexSettings settings, Map<String, Settings> settingsMap, Map<String, AnalysisModule.AnalysisProvider<T>> providerMap, Map<String, AnalysisModule.AnalysisProvider<T>> defaultInstance) throws IOException {
Settings defaultSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, settings.getIndexVersionCreated()).build();
Map<String, T> factories = new HashMap<>();
for (Map.Entry<String, Settings> entry : settingsMap.entrySet()) {
String name = entry.getKey();
Settings currentSettings = entry.getValue();
String typeName = currentSettings.get("type");
if (analyzer) {
T factory;
if (typeName == null) {
if (currentSettings.get("tokenizer") != null) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
} else {
throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it");
}
} else if (typeName.equals("custom")) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings);
} else {
AnalysisModule.AnalysisProvider<T> type = providerMap.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
}
factory = type.get(settings, environemnt, name, currentSettings);
}
factories.put(name, factory);
} else {
if (typeName == null) {
throw new IllegalArgumentException(toBuild + " [" + name + "] must have a type associated with it");
}
AnalysisModule.AnalysisProvider<T> type = providerMap.get(typeName);
if (type == null) {
throw new IllegalArgumentException("Unknown " + toBuild + " type [" + typeName + "] for [" + name + "]");
}
final T factory = type.get(settings, environemnt, name, currentSettings);
factories.put(name, factory);
}
}
// go over the char filters in the bindings and register the ones that are not configured
for (Map.Entry<String, AnalysisModule.AnalysisProvider<T>> entry : providerMap.entrySet()) {
String name = entry.getKey();
AnalysisModule.AnalysisProvider<T> provider = entry.getValue();
// we don't want to re-register one that already exists
if (settingsMap.containsKey(name)) {
continue;
}
// check, if it requires settings, then don't register it, we know default has no settings...
if (provider.requiresAnalysisSettings()) {
continue;
}
AnalysisModule.AnalysisProvider<T> defaultProvider = defaultInstance.get(name);
final T instance;
if (defaultProvider == null) {
instance = provider.get(settings, environemnt, name, defaultSettings);
} else {
instance = defaultProvider.get(settings, environemnt, name, defaultSettings);
}
factories.put(name, instance);
String camelCase = Strings.toCamelCase(name);
if (providerMap.containsKey(camelCase) == false && factories.containsKey(camelCase) == false) {
factories.put(camelCase, instance);
}
}
for (Map.Entry<String, AnalysisModule.AnalysisProvider<T>> entry : defaultInstance.entrySet()) {
final String name = entry.getKey();
final AnalysisModule.AnalysisProvider<T> provider = entry.getValue();
final String camelCase = Strings.toCamelCase(name);
if (factories.containsKey(name) == false || (defaultInstance.containsKey(camelCase) == false && factories.containsKey(camelCase) == false)) {
final T instance = provider.get(settings, environemnt, name, defaultSettings);
if (factories.containsKey(name) == false) {
factories.put(name, instance);
}
if ((defaultInstance.containsKey(camelCase) == false && factories.containsKey(camelCase) == false)) {
factories.put(camelCase, instance);
}
}
}
return factories;
}
private static class PrebuiltAnalysis implements Closeable {
final Map<String, AnalysisModule.AnalysisProvider<AnalyzerProvider>> analyzerProviderFactories;
final Map<String, AnalysisModule.AnalysisProvider<TokenizerFactory>> tokenizerFactories;
final Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> tokenFilterFactories;
final Map<String, AnalysisModule.AnalysisProvider<CharFilterFactory>> charFilterFactories;
private PrebuiltAnalysis() {
Map<String, PreBuiltAnalyzerProviderFactory> analyzerProviderFactories = new HashMap<>();
Map<String, PreBuiltTokenizerFactoryFactory> tokenizerFactories = new HashMap<>();
Map<String, PreBuiltTokenFilterFactoryFactory> tokenFilterFactories = new HashMap<>();
Map<String, PreBuiltCharFilterFactoryFactory> charFilterFactories = new HashMap<>();
// Analyzers
for (PreBuiltAnalyzers preBuiltAnalyzerEnum : PreBuiltAnalyzers.values()) {
String name = preBuiltAnalyzerEnum.name().toLowerCase(Locale.ROOT);
analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, AnalyzerScope.INDICES, preBuiltAnalyzerEnum.getAnalyzer(Version.CURRENT)));
}
// Tokenizers
for (PreBuiltTokenizers preBuiltTokenizer : PreBuiltTokenizers.values()) {
String name = preBuiltTokenizer.name().toLowerCase(Locale.ROOT);
tokenizerFactories.put(name, new PreBuiltTokenizerFactoryFactory(preBuiltTokenizer.getTokenizerFactory(Version.CURRENT)));
}
// Tokenizer aliases
tokenizerFactories.put("nGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.NGRAM.getTokenizerFactory(Version.CURRENT)));
tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(PreBuiltTokenizers.EDGE_NGRAM.getTokenizerFactory(Version.CURRENT)));
// Token filters
for (PreBuiltTokenFilters preBuiltTokenFilter : PreBuiltTokenFilters.values()) {
String name = preBuiltTokenFilter.name().toLowerCase(Locale.ROOT);
tokenFilterFactories.put(name, new PreBuiltTokenFilterFactoryFactory(preBuiltTokenFilter.getTokenFilterFactory(Version.CURRENT)));
}
// Token filter aliases
tokenFilterFactories.put("nGram", new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.NGRAM.getTokenFilterFactory(Version.CURRENT)));
tokenFilterFactories.put("edgeNGram", new PreBuiltTokenFilterFactoryFactory(PreBuiltTokenFilters.EDGE_NGRAM.getTokenFilterFactory(Version.CURRENT)));
// Char Filters
for (PreBuiltCharFilters preBuiltCharFilter : PreBuiltCharFilters.values()) {
String name = preBuiltCharFilter.name().toLowerCase(Locale.ROOT);
charFilterFactories.put(name, new PreBuiltCharFilterFactoryFactory(preBuiltCharFilter.getCharFilterFactory(Version.CURRENT)));
}
// Char filter aliases
charFilterFactories.put("htmlStrip", new PreBuiltCharFilterFactoryFactory(PreBuiltCharFilters.HTML_STRIP.getCharFilterFactory(Version.CURRENT)));
this.analyzerProviderFactories = Collections.unmodifiableMap(analyzerProviderFactories);
this.charFilterFactories = Collections.unmodifiableMap(charFilterFactories);
this.tokenFilterFactories = Collections.unmodifiableMap(tokenFilterFactories);
this.tokenizerFactories = Collections.unmodifiableMap(tokenizerFactories);
}
public AnalysisModule.AnalysisProvider<CharFilterFactory> getCharFilterFactory(String name) {
return charFilterFactories.get(name);
}
public AnalysisModule.AnalysisProvider<TokenFilterFactory> getTokenFilterFactory(String name) {
return tokenFilterFactories.get(name);
}
public AnalysisModule.AnalysisProvider<TokenizerFactory> getTokenizerFactory(String name) {
return tokenizerFactories.get(name);
}
public AnalysisModule.AnalysisProvider<AnalyzerProvider> getAnalyzerProvider(String name) {
return analyzerProviderFactories.get(name);
}
Analyzer analyzer(String name) {
PreBuiltAnalyzerProviderFactory analyzerProviderFactory = (PreBuiltAnalyzerProviderFactory) analyzerProviderFactories.get(name);
if (analyzerProviderFactory == null) {
return null;
}
return analyzerProviderFactory.analyzer();
}
@Override
public void close() throws IOException {
IOUtils.close(analyzerProviderFactories.values().stream().map((a) -> ((PreBuiltAnalyzerProviderFactory)a).analyzer()).collect(Collectors.toList()));
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
@ -29,9 +28,9 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -51,160 +50,20 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
private final NamedAnalyzer defaultSearchAnalyzer;
private final NamedAnalyzer defaultSearchQuoteAnalyzer;
public AnalysisService(IndexSettings indexSettings) {
this(indexSettings, null, null, null, null, null);
}
@Inject
public AnalysisService(IndexSettings indexSettings, @Nullable IndicesAnalysisService indicesAnalysisService,
@Nullable Map<String, AnalyzerProviderFactory> analyzerFactoryFactories,
@Nullable Map<String, TokenizerFactoryFactory> tokenizerFactoryFactories,
@Nullable Map<String, CharFilterFactoryFactory> charFilterFactoryFactories,
@Nullable Map<String, TokenFilterFactoryFactory> tokenFilterFactoryFactories) {
public AnalysisService(IndexSettings indexSettings,
Map<String, AnalyzerProvider> analyzerProviders,
Map<String, TokenizerFactory> tokenizerFactoryFactories,
Map<String, CharFilterFactory> charFilterFactoryFactories,
Map<String, TokenFilterFactory> tokenFilterFactoryFactories) {
super(indexSettings);
Settings defaultSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, indexSettings.getIndexVersionCreated()).build();
Map<String, TokenizerFactory> tokenizers = new HashMap<>();
if (tokenizerFactoryFactories != null) {
Map<String, Settings> tokenizersSettings = this.indexSettings.getSettings().getGroups("index.analysis.tokenizer");
for (Map.Entry<String, TokenizerFactoryFactory> entry : tokenizerFactoryFactories.entrySet()) {
String tokenizerName = entry.getKey();
TokenizerFactoryFactory tokenizerFactoryFactory = entry.getValue();
Settings tokenizerSettings = tokenizersSettings.get(tokenizerName);
if (tokenizerSettings == null) {
tokenizerSettings = defaultSettings;
}
TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, tokenizerSettings);
tokenizers.put(tokenizerName, tokenizerFactory);
tokenizers.put(Strings.toCamelCase(tokenizerName), tokenizerFactory);
}
}
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltTokenizerFactoryFactory> entry : indicesAnalysisService.tokenizerFactories().entrySet()) {
String name = entry.getKey();
if (!tokenizers.containsKey(name)) {
tokenizers.put(name, entry.getValue().create(name, defaultSettings));
}
name = Strings.toCamelCase(entry.getKey());
if (!name.equals(entry.getKey())) {
if (!tokenizers.containsKey(name)) {
tokenizers.put(name, entry.getValue().create(name, defaultSettings));
}
}
}
}
this.tokenizers = unmodifiableMap(tokenizers);
Map<String, CharFilterFactory> charFilters = new HashMap<>();
if (charFilterFactoryFactories != null) {
Map<String, Settings> charFiltersSettings = this.indexSettings.getSettings().getGroups("index.analysis.char_filter");
for (Map.Entry<String, CharFilterFactoryFactory> entry : charFilterFactoryFactories.entrySet()) {
String charFilterName = entry.getKey();
CharFilterFactoryFactory charFilterFactoryFactory = entry.getValue();
Settings charFilterSettings = charFiltersSettings.get(charFilterName);
if (charFilterSettings == null) {
charFilterSettings = defaultSettings;
}
CharFilterFactory tokenFilterFactory = charFilterFactoryFactory.create(charFilterName, charFilterSettings);
charFilters.put(charFilterName, tokenFilterFactory);
charFilters.put(Strings.toCamelCase(charFilterName), tokenFilterFactory);
}
}
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltCharFilterFactoryFactory> entry : indicesAnalysisService.charFilterFactories().entrySet()) {
String name = entry.getKey();
if (!charFilters.containsKey(name)) {
charFilters.put(name, entry.getValue().create(name, defaultSettings));
}
name = Strings.toCamelCase(entry.getKey());
if (!name.equals(entry.getKey())) {
if (!charFilters.containsKey(name)) {
charFilters.put(name, entry.getValue().create(name, defaultSettings));
}
}
}
}
this.charFilters = unmodifiableMap(charFilters);
Map<String, TokenFilterFactory> tokenFilters = new HashMap<>();
if (tokenFilterFactoryFactories != null) {
Map<String, Settings> tokenFiltersSettings = this.indexSettings.getSettings().getGroups("index.analysis.filter");
for (Map.Entry<String, TokenFilterFactoryFactory> entry : tokenFilterFactoryFactories.entrySet()) {
String tokenFilterName = entry.getKey();
TokenFilterFactoryFactory tokenFilterFactoryFactory = entry.getValue();
Settings tokenFilterSettings = tokenFiltersSettings.get(tokenFilterName);
if (tokenFilterSettings == null) {
tokenFilterSettings = defaultSettings;
}
TokenFilterFactory tokenFilterFactory = tokenFilterFactoryFactory.create(tokenFilterName, tokenFilterSettings);
tokenFilters.put(tokenFilterName, tokenFilterFactory);
tokenFilters.put(Strings.toCamelCase(tokenFilterName), tokenFilterFactory);
}
}
// pre initialize the globally registered ones into the map
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltTokenFilterFactoryFactory> entry : indicesAnalysisService.tokenFilterFactories().entrySet()) {
String name = entry.getKey();
if (!tokenFilters.containsKey(name)) {
tokenFilters.put(name, entry.getValue().create(name, defaultSettings));
}
name = Strings.toCamelCase(entry.getKey());
if (!name.equals(entry.getKey())) {
if (!tokenFilters.containsKey(name)) {
tokenFilters.put(name, entry.getValue().create(name, defaultSettings));
}
}
}
}
this.tokenFilters = unmodifiableMap(tokenFilters);
Map<String, AnalyzerProvider> analyzerProviders = new HashMap<>();
if (analyzerFactoryFactories != null) {
Map<String, Settings> analyzersSettings = this.indexSettings.getSettings().getGroups("index.analysis.analyzer");
for (Map.Entry<String, AnalyzerProviderFactory> entry : analyzerFactoryFactories.entrySet()) {
String analyzerName = entry.getKey();
AnalyzerProviderFactory analyzerFactoryFactory = entry.getValue();
Settings analyzerSettings = analyzersSettings.get(analyzerName);
if (analyzerSettings == null) {
analyzerSettings = defaultSettings;
}
AnalyzerProvider analyzerFactory = analyzerFactoryFactory.create(analyzerName, analyzerSettings);
analyzerProviders.put(analyzerName, analyzerFactory);
}
}
if (indicesAnalysisService != null) {
for (Map.Entry<String, PreBuiltAnalyzerProviderFactory> entry : indicesAnalysisService.analyzerProviderFactories().entrySet()) {
String name = entry.getKey();
Version indexVersion = indexSettings.getIndexVersionCreated();
if (!analyzerProviders.containsKey(name)) {
analyzerProviders.put(name, entry.getValue().create(name, Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build()));
}
String camelCaseName = Strings.toCamelCase(name);
if (!camelCaseName.equals(entry.getKey()) && !analyzerProviders.containsKey(camelCaseName)) {
analyzerProviders.put(camelCaseName, entry.getValue().create(name, Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build()));
}
}
}
this.tokenizers = unmodifiableMap(tokenizerFactoryFactories);
this.charFilters = unmodifiableMap(charFilterFactoryFactories);
this.tokenFilters = unmodifiableMap(tokenFilterFactoryFactories);
analyzerProviders = new HashMap<>(analyzerProviders);
if (!analyzerProviders.containsKey("default")) {
analyzerProviders.put("default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS));
}
if (!analyzerProviders.containsKey("default_index")) {
analyzerProviders.put("default_index", analyzerProviders.get("default"));
}
if (!analyzerProviders.containsKey("default_search")) {
analyzerProviders.put("default_search", analyzerProviders.get("default"));
}
@ -213,7 +72,9 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
}
Map<String, NamedAnalyzer> analyzers = new HashMap<>();
for (AnalyzerProvider analyzerFactory : analyzerProviders.values()) {
for (Map.Entry<String, AnalyzerProvider> entry : analyzerProviders.entrySet()) {
AnalyzerProvider analyzerFactory = entry.getValue();
String name = entry.getKey();
/*
* Lucene defaults positionIncrementGap to 0 in all analyzers but
* Elasticsearch defaults them to 0 only before version 2.0
@ -245,10 +106,12 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
analyzer = new NamedAnalyzer(analyzer, overridePositionIncrementGap);
}
} else {
analyzer = new NamedAnalyzer(analyzerFactory.name(), analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
analyzer = new NamedAnalyzer(name, analyzerFactory.scope(), analyzerF, overridePositionIncrementGap);
}
analyzers.put(analyzerFactory.name(), analyzer);
analyzers.put(Strings.toCamelCase(analyzerFactory.name()), analyzer);
if (analyzers.containsKey(name)) {
throw new IllegalStateException("already registered analyzer with name: " + name);
}
analyzers.put(name, analyzer);
String strAliases = this.indexSettings.getSettings().get("index.analysis.analyzer." + analyzerFactory.name() + ".alias");
if (strAliases != null) {
for (String alias : Strings.commaDelimitedListToStringArray(strAliases)) {

View File

@ -1,32 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import java.lang.annotation.*;
/**
* A marker annotation on {@link CharFilterFactory}, {@link AnalyzerProvider}, {@link TokenFilterFactory},
* or {@link TokenizerFactory} which will cause the provider/factory to only be created when explicit settings
* are provided.
*/
@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface AnalysisSettingsRequired {
}

View File

@ -1,30 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.elasticsearch.common.settings.Settings;
/**
*
*/
public interface AnalyzerProviderFactory {
AnalyzerProvider create(String name, Settings settings);
}

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tr.ApostropheFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ApostropheFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ApostropheFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ApostropheFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arabic
private final ArabicAnalyzer arabicAnalyzer;
@Inject
public ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
arabicAnalyzer = new ArabicAnalyzer(Analysis.parseStopWords(env, settings, ArabicAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ArabicNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ArabicNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ArabicNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ar.ArabicStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -31,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ArabicStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ArabicStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ArabicStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.hy.ArmenianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arme
private final ArmenianAnalyzer analyzer;
@Inject
public ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new ArmenianAnalyzer(Analysis.parseStopWords(env, settings, ArmenianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.eu.BasqueAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider<Basque
private final BasqueAnalyzer analyzer;
@Inject
public BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new BasqueAnalyzer(Analysis.parseStopWords(env, settings, BasqueAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bra
private final BrazilianAnalyzer analyzer;
@Inject
public BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new BrazilianAnalyzer(Analysis.parseStopWords(env, settings, BrazilianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.br.BrazilianStemFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
*
@ -34,8 +33,7 @@ public class BrazilianStemTokenFilterFactory extends AbstractTokenFilterFactory
private final CharArraySet exclusions;
@Inject
public BrazilianStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public BrazilianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bul
private final BulgarianAnalyzer analyzer;
@Inject
public BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new BulgarianAnalyzer(Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKBigramFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.util.Arrays;
@ -49,8 +48,7 @@ public final class CJKBigramFilterFactory extends AbstractTokenFilterFactory {
private final int flags;
private final boolean outputUnigrams;
@Inject
public CJKBigramFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public CJKBigramFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
outputUnigrams = settings.getAsBoolean("output_unigrams", false);
final String[] asArray = settings.getAsArray("ignored_scripts");

View File

@ -21,14 +21,13 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cjk.CJKWidthFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public final class CJKWidthFilterFactory extends AbstractTokenFilterFactory {
@Inject
public CJKWidthFilterFactory(IndexSettings indexSettings, String name, Settings settings) {
public CJKWidthFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ca.CatalanAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider<Catal
private final CatalanAnalyzer analyzer;
@Inject
public CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new CatalanAnalyzer(Analysis.parseStopWords(env, settings, CatalanAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -1,30 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.elasticsearch.common.settings.Settings;
/**
*
*/
public interface CharFilterFactoryFactory {
CharFilterFactory create(String name, Settings settings);
}

View File

@ -20,9 +20,8 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -32,8 +31,7 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stand
private final StandardAnalyzer analyzer;
@Inject
public ChineseAnalyzerProvider(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
// old index: best effort
analyzer = new StandardAnalyzer();

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider<CJKAnalyz
private final CJKAnalyzer analyzer;
@Inject
public CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
CharArraySet stopWords = Analysis.parseStopWords(env, settings, CJKAnalyzer.getDefaultStopSet());

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.ClassicFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class ClassicFilterFactory extends AbstractTokenFilterFactory {
@Inject
public ClassicFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ClassicFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -22,9 +22,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.ClassicTokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -34,8 +33,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory {
private final int maxTokenLength;
@Inject
public ClassicTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
}

View File

@ -23,8 +23,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
import org.apache.lucene.analysis.commongrams.CommonGramsQueryFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -32,7 +30,6 @@ import org.elasticsearch.index.IndexSettings;
/**
*
*/
@AnalysisSettingsRequired
public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet words;
@ -41,8 +38,7 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean queryMode;
@Inject
public CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
this.queryMode = settings.getAsBoolean("query_mode", false);

View File

@ -20,8 +20,6 @@
package org.elasticsearch.index.analysis;
import org.elasticsearch.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.core.StringFieldMapper;
@ -39,9 +37,8 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
private CustomAnalyzer customAnalyzer;
@Inject
public CustomAnalyzerProvider(IndexSettings indexSettings,
@Assisted String name, @Assisted Settings settings) {
String name, Settings settings) {
super(indexSettings, name, settings);
this.analyzerSettings = settings;
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.cz.CzechAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider<CzechAn
private final CzechAnalyzer analyzer;
@Inject
public CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new CzechAnalyzer(Analysis.parseStopWords(env, settings, CzechAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,15 +20,13 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cz.CzechStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public class CzechStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public CzechStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public CzechStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.da.DanishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Danish
private final DanishAnalyzer analyzer;
@Inject
public DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new DanishAnalyzer(Analysis.parseStopWords(env, settings, DanishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.DecimalDigitFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +30,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public final class DecimalDigitFilterFactory extends AbstractTokenFilterFactory {
@Inject
public DecimalDigitFilterFactory(IndexSettings indexSettings, String name, Settings settings) {
public DecimalDigitFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.payloads.*;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -41,9 +39,8 @@ public class DelimitedPayloadTokenFilterFactory extends AbstractTokenFilterFacto
char delimiter;
PayloadEncoder encoder;
@Inject
public DelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name,
@Assisted Settings settings) {
public DelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name,
Settings settings) {
super(indexSettings, name, settings);
String delimiterConf = settings.get(DELIMITER);
if (delimiterConf != null) {

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.nl.DutchAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider<DutchAn
private final DutchAnalyzer analyzer;
@Inject
public DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new DutchAnalyzer(Analysis.parseStopWords(env, settings, DutchAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.tartarus.snowball.ext.DutchStemmer;
@ -36,8 +35,7 @@ public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet exclusions;
@Inject
public DutchStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public DutchStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -25,9 +25,8 @@ import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -46,8 +45,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
private org.elasticsearch.Version esVersion;
@Inject
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -24,9 +24,8 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars;
@ -48,8 +47,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
protected org.elasticsearch.Version esVersion;
@Inject
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);

View File

@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.ElisionFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -35,8 +33,7 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet articles;
@Inject
public ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
this.articles = Analysis.parseArticles(env, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.en.EnglishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Engli
private final EnglishAnalyzer analyzer;
@Inject
public EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new EnglishAnalyzer(Analysis.parseStopWords(env, settings, EnglishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.fi.FinnishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Finni
private final FinnishAnalyzer analyzer;
@Inject
public FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new FinnishAnalyzer(Analysis.parseStopWords(env, settings, FinnishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider<French
private final FrenchAnalyzer analyzer;
@Inject
public FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new FrenchAnalyzer(Analysis.parseStopWords(env, settings, FrenchAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.tartarus.snowball.ext.FrenchStemmer;
@ -36,8 +35,7 @@ public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet exclusions;
@Inject
public FrenchStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public FrenchStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.gl.GalicianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Gali
private final GalicianAnalyzer analyzer;
@Inject
public GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new GalicianAnalyzer(Analysis.parseStopWords(env, settings, GalicianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.de.GermanAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider<German
private final GermanAnalyzer analyzer;
@Inject
public GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new GermanAnalyzer(Analysis.parseStopWords(env, settings, GermanAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class GermanNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public GermanNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public GermanNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -23,9 +23,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.de.GermanStemFilter;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -35,8 +34,7 @@ public class GermanStemTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet exclusions;
@Inject
public GermanStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public GermanStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
}

View File

@ -20,8 +20,6 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.el.GreekAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -33,8 +31,7 @@ public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider<GreekAn
private final GreekAnalyzer analyzer;
@Inject
public GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new GreekAnalyzer(Analysis.parseStopWords(env, settings, GreekAnalyzer.getDefaultStopSet()));
analyzer.setVersion(version);

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.hi.HindiAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider<HindiAn
private final HindiAnalyzer analyzer;
@Inject
public HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new HindiAnalyzer(Analysis.parseStopWords(env, settings, HindiAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.hi.HindiNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class HindiNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public HindiNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public HindiNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,9 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.io.Reader;
@ -34,8 +35,7 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
public class HtmlStripCharFilterFactory extends AbstractCharFilterFactory {
private final Set<String> escapedTags;
@Inject
public HtmlStripCharFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public HtmlStripCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name);
String[] escapedTags = settings.getAsArray("escaped_tags");
if (escapedTags.length > 0) {

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.hu.HungarianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Hun
private final HungarianAnalyzer analyzer;
@Inject
public HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new HungarianAnalyzer(Analysis.parseStopWords(env, settings, HungarianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,23 +21,19 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.hunspell.Dictionary;
import org.apache.lucene.analysis.hunspell.HunspellStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.HunspellService;
import java.util.Locale;
@AnalysisSettingsRequired
public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory {
private final Dictionary dictionary;
private final boolean dedup;
private final boolean longestOnly;
@Inject
public HunspellTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings, HunspellService hunspellService) {
public HunspellTokenFilterFactory(IndexSettings indexSettings, String name, Settings settings, HunspellService hunspellService) {
super(indexSettings, name, settings);
String locale = settings.get("locale", settings.get("language", settings.get("lang", null)));

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.in.IndicNormalizationFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -30,8 +29,7 @@ import org.elasticsearch.index.IndexSettings;
*/
public class IndicNormalizationFilterFactory extends AbstractTokenFilterFactory {
@Inject
public IndicNormalizationFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public IndicNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.id.IndonesianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider<In
private final IndonesianAnalyzer analyzer;
@Inject
public IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new IndonesianAnalyzer(Analysis.parseStopWords(env, settings, IndonesianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.ga.IrishAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class IrishAnalyzerProvider extends AbstractIndexAnalyzerProvider<IrishAn
private final IrishAnalyzer analyzer;
@Inject
public IrishAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public IrishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new IrishAnalyzer(Analysis.parseStopWords(env, settings, IrishAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.it.ItalianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Itali
private final ItalianAnalyzer analyzer;
@Inject
public ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new ItalianAnalyzer(Analysis.parseStopWords(env, settings, ItalianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

View File

@ -21,15 +21,13 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.en.KStemFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
public class KStemTokenFilterFactory extends AbstractTokenFilterFactory {
@Inject
public KStemTokenFilterFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public KStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
}

View File

@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.TypeTokenFilter;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import java.util.Arrays;
@ -41,14 +39,12 @@ import java.util.Set;
* <li>{@value #KEEP_TYPES_KEY} the array of words / tokens to keep.</li>
* </ul>
*/
@AnalysisSettingsRequired
public class KeepTypesFilterFactory extends AbstractTokenFilterFactory {
private final Set<String> keepTypes;
private static final String KEEP_TYPES_KEY = "types";
@Inject
public KeepTypesFilterFactory(IndexSettings indexSettings,
Environment env, @Assisted String name, @Assisted Settings settings) {
Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
final String[] arrayKeepTypes = settings.getAsArray(KEEP_TYPES_KEY, null);

View File

@ -24,10 +24,8 @@ import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
/**
@ -51,7 +49,6 @@ import org.elasticsearch.index.IndexSettings;
*
* @see StopTokenFilterFactory
*/
@AnalysisSettingsRequired
public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet keepWords;
private final boolean enablePositionIncrements;
@ -60,9 +57,8 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
@Inject
public KeepWordFilterFactory(IndexSettings indexSettings,
Environment env, @Assisted String name, @Assisted Settings settings) {
Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
final String[] arrayKeepWords = settings.getAsArray(KEEP_WORDS_KEY, null);

View File

@ -20,9 +20,8 @@
package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -32,8 +31,7 @@ public class KeywordAnalyzerProvider extends AbstractIndexAnalyzerProvider<Keywo
private final KeywordAnalyzer keywordAnalyzer;
@Inject
public KeywordAnalyzerProvider(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public KeywordAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.keywordAnalyzer = new KeywordAnalyzer();
}

View File

@ -22,21 +22,17 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import java.util.Set;
@AnalysisSettingsRequired
public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet keywordLookup;
@Inject
public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
boolean ignoreCase = settings.getAsBoolean("ignore_case", false);

View File

@ -21,9 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
/**
@ -33,8 +32,7 @@ public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
private final int bufferSize;
@Inject
public KeywordTokenizerFactory(IndexSettings indexSettings, @Assisted String name, @Assisted Settings settings) {
public KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
bufferSize = settings.getAsInt("buffer_size", 256);
}

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.lv.LatvianAnalyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
@ -34,8 +32,7 @@ public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Latvi
private final LatvianAnalyzer analyzer;
@Inject
public LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) {
public LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings);
analyzer = new LatvianAnalyzer(Analysis.parseStopWords(env, settings, LatvianAnalyzer.getDefaultStopSet()),
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET));

Some files were not shown because too many files have changed in this diff Show More