Use preconfigured filters correctly in Analyze API (#43568)
When a named token filter or char filter is passed as part of an Analyze API request with no index, we currently try and build the relevant filter using no index settings. However, this can miss cases where there is a pre-configured filter defined in the analysis registry. One example here is the elision filter, which has a pre-configured version built with the french elision set; when used as part of normal analysis, this preconfigured set is used, but when used as part of the Analyze API we end up with NPEs because it tries to instantiate the filter with no index settings. This commit changes the Analyze API to check for pre-configured filters in the case that the request has no index defined, and is using a name rather than a custom definition for a filter. It also changes the pre-configured `word_delimiter_graph` filter and `edge_ngram` tokenizer to make their settings consistent with the defaults used when creating them with no settings Closes #43002 Closes #43621 Closes #43582
This commit is contained in:
parent
05a7333eca
commit
8ff5519b11
|
@ -83,6 +83,7 @@ import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter;
|
import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
|
||||||
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||||
|
@ -110,6 +111,7 @@ import org.apache.lucene.analysis.tr.ApostropheFilter;
|
||||||
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
|
import org.apache.lucene.analysis.tr.TurkishAnalyzer;
|
||||||
import org.apache.lucene.analysis.util.ElisionFilter;
|
import org.apache.lucene.analysis.util.ElisionFilter;
|
||||||
import org.apache.lucene.util.SetOnce;
|
import org.apache.lucene.util.SetOnce;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
|
@ -488,13 +490,15 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
|
||||||
| WordDelimiterFilter.SPLIT_ON_CASE_CHANGE
|
| WordDelimiterFilter.SPLIT_ON_CASE_CHANGE
|
||||||
| WordDelimiterFilter.SPLIT_ON_NUMERICS
|
| WordDelimiterFilter.SPLIT_ON_NUMERICS
|
||||||
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null)));
|
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null)));
|
||||||
filters.add(PreConfiguredTokenFilter.singleton("word_delimiter_graph", false, input ->
|
filters.add(PreConfiguredTokenFilter.singletonWithVersion("word_delimiter_graph", false, (input, version) -> {
|
||||||
new WordDelimiterGraphFilter(input,
|
boolean adjustOffsets = version.onOrAfter(Version.V_7_3_0);
|
||||||
|
return new WordDelimiterGraphFilter(input, adjustOffsets, WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE,
|
||||||
WordDelimiterGraphFilter.GENERATE_WORD_PARTS
|
WordDelimiterGraphFilter.GENERATE_WORD_PARTS
|
||||||
| WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS
|
| WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS
|
||||||
| WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE
|
| WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE
|
||||||
| WordDelimiterGraphFilter.SPLIT_ON_NUMERICS
|
| WordDelimiterGraphFilter.SPLIT_ON_NUMERICS
|
||||||
| WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null)));
|
| WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, null);
|
||||||
|
}));
|
||||||
return filters;
|
return filters;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -508,8 +512,12 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new));
|
tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new));
|
tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new));
|
tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram",
|
tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("edge_ngram", (version) -> {
|
||||||
() -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE)));
|
if (version.onOrAfter(Version.V_7_3_0)) {
|
||||||
|
return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
||||||
|
}
|
||||||
|
return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
|
||||||
|
}));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1)));
|
tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1)));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new));
|
tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new));
|
||||||
// TODO deprecate and remove in API
|
// TODO deprecate and remove in API
|
||||||
|
@ -518,8 +526,12 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
|
||||||
|
|
||||||
// Temporary shim for aliases. TODO deprecate after they are moved
|
// Temporary shim for aliases. TODO deprecate after they are moved
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new));
|
tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("edgeNGram",
|
tokenizers.add(PreConfiguredTokenizer.elasticsearchVersion("edgeNGram", (version) -> {
|
||||||
() -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE)));
|
if (version.onOrAfter(Version.V_7_3_0)) {
|
||||||
|
return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
||||||
|
}
|
||||||
|
return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
|
||||||
|
}));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new));
|
tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new));
|
||||||
|
|
||||||
return tokenizers;
|
return tokenizers;
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.env.Environment;
|
||||||
|
import org.elasticsearch.env.TestEnvironment;
|
||||||
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||||
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
|
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||||
|
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||||
|
import org.elasticsearch.test.IndexSettingsModule;
|
||||||
|
import org.elasticsearch.test.VersionUtils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
|
public class EdgeNGramTokenizerTests extends ESTokenStreamTestCase {
|
||||||
|
|
||||||
|
private IndexAnalyzers buildAnalyzers(Version version, String tokenizer) throws IOException {
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||||
|
.build();
|
||||||
|
Settings indexSettings = Settings.builder()
|
||||||
|
.put(IndexMetaData.SETTING_VERSION_CREATED, version)
|
||||||
|
.put("index.analysis.analyzer.my_analyzer.tokenizer", tokenizer)
|
||||||
|
.build();
|
||||||
|
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
|
||||||
|
return new AnalysisModule(TestEnvironment.newEnvironment(settings),
|
||||||
|
Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPreConfiguredTokenizer() throws IOException {
|
||||||
|
|
||||||
|
// Before 7.3 we return ngrams of length 1 only
|
||||||
|
{
|
||||||
|
Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0,
|
||||||
|
VersionUtils.getPreviousVersion(Version.V_7_3_0));
|
||||||
|
try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edge_ngram")) {
|
||||||
|
NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
|
||||||
|
assertNotNull(analyzer);
|
||||||
|
assertAnalyzesTo(analyzer, "test", new String[]{"t"});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check deprecated name as well
|
||||||
|
{
|
||||||
|
Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0,
|
||||||
|
VersionUtils.getPreviousVersion(Version.V_7_3_0));
|
||||||
|
try (IndexAnalyzers indexAnalyzers = buildAnalyzers(version, "edgeNGram")) {
|
||||||
|
NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
|
||||||
|
assertNotNull(analyzer);
|
||||||
|
assertAnalyzesTo(analyzer, "test", new String[]{"t"});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Afterwards, we return ngrams of length 1 and 2, to match the default factory settings
|
||||||
|
{
|
||||||
|
try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edge_ngram")) {
|
||||||
|
NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
|
||||||
|
assertNotNull(analyzer);
|
||||||
|
assertAnalyzesTo(analyzer, "test", new String[]{"t", "te"});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check deprecated name as well
|
||||||
|
{
|
||||||
|
try (IndexAnalyzers indexAnalyzers = buildAnalyzers(Version.CURRENT, "edgeNGram")) {
|
||||||
|
NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
|
||||||
|
assertNotNull(analyzer);
|
||||||
|
assertAnalyzesTo(analyzer, "test", new String[]{"t", "te"});
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -20,14 +20,24 @@ package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
|
import org.elasticsearch.env.TestEnvironment;
|
||||||
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
|
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
|
||||||
|
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||||
|
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||||
|
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
import org.elasticsearch.test.IndexSettingsModule;
|
||||||
|
import org.elasticsearch.test.VersionUtils;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.StringReader;
|
import java.io.StringReader;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
public class WordDelimiterGraphTokenFilterFactoryTests
|
public class WordDelimiterGraphTokenFilterFactoryTests
|
||||||
extends BaseWordDelimiterTokenFilterFactoryTestCase {
|
extends BaseWordDelimiterTokenFilterFactoryTestCase {
|
||||||
|
@ -107,4 +117,51 @@ public class WordDelimiterGraphTokenFilterFactoryTests
|
||||||
assertTokenStreamContents(tokenFilter.create(tokenizer), expected, expectedStartOffsets, expectedEndOffsets, null,
|
assertTokenStreamContents(tokenFilter.create(tokenizer), expected, expectedStartOffsets, expectedEndOffsets, null,
|
||||||
expectedIncr, expectedPosLen, null);
|
expectedIncr, expectedPosLen, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testPreconfiguredFilter() throws IOException {
|
||||||
|
// Before 7.3 we don't adjust offsets
|
||||||
|
{
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||||
|
.build();
|
||||||
|
Settings indexSettings = Settings.builder()
|
||||||
|
.put(IndexMetaData.SETTING_VERSION_CREATED,
|
||||||
|
VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, VersionUtils.getPreviousVersion(Version.V_7_3_0)))
|
||||||
|
.put("index.analysis.analyzer.my_analyzer.tokenizer", "standard")
|
||||||
|
.putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph")
|
||||||
|
.build();
|
||||||
|
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
|
||||||
|
|
||||||
|
try (IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings),
|
||||||
|
Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings)) {
|
||||||
|
|
||||||
|
NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
|
||||||
|
assertNotNull(analyzer);
|
||||||
|
assertAnalyzesTo(analyzer, "h100", new String[]{"h", "100"}, new int[]{ 0, 0 }, new int[]{ 4, 4 });
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Afger 7.3 we do adjust offsets
|
||||||
|
{
|
||||||
|
Settings settings = Settings.builder()
|
||||||
|
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||||
|
.build();
|
||||||
|
Settings indexSettings = Settings.builder()
|
||||||
|
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
|
.put("index.analysis.analyzer.my_analyzer.tokenizer", "standard")
|
||||||
|
.putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph")
|
||||||
|
.build();
|
||||||
|
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
|
||||||
|
|
||||||
|
try (IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings),
|
||||||
|
Collections.singletonList(new CommonAnalysisPlugin())).getAnalysisRegistry().build(idxSettings)) {
|
||||||
|
|
||||||
|
NamedAnalyzer analyzer = indexAnalyzers.get("my_analyzer");
|
||||||
|
assertNotNull(analyzer);
|
||||||
|
assertAnalyzesTo(analyzer, "h100", new String[]{"h", "100"}, new int[]{ 0, 1 }, new int[]{ 1, 4 });
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,6 +111,7 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
private <T> T getComponentFactory(IndexSettings settings, NameOrDefinition nod,
|
private <T> T getComponentFactory(IndexSettings settings, NameOrDefinition nod,
|
||||||
String componentType,
|
String componentType,
|
||||||
Function<String, AnalysisProvider<T>> globalComponentProvider,
|
Function<String, AnalysisProvider<T>> globalComponentProvider,
|
||||||
|
Function<String, AnalysisProvider<T>> prebuiltComponentProvider,
|
||||||
BiFunction<String, IndexSettings, AnalysisProvider<T>> indexComponentProvider) throws IOException {
|
BiFunction<String, IndexSettings, AnalysisProvider<T>> indexComponentProvider) throws IOException {
|
||||||
if (nod.definition != null) {
|
if (nod.definition != null) {
|
||||||
// custom component, so we build it from scratch
|
// custom component, so we build it from scratch
|
||||||
|
@ -128,10 +129,14 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
return factory.get(settings, environment, "__anonymous__" + type, nod.definition);
|
return factory.get(settings, environment, "__anonymous__" + type, nod.definition);
|
||||||
}
|
}
|
||||||
if (settings == null) {
|
if (settings == null) {
|
||||||
// no index provided, so we use global analysis components only
|
// no index provided, so we use prebuilt analysis components
|
||||||
AnalysisProvider<T> factory = globalComponentProvider.apply(nod.name);
|
AnalysisProvider<T> factory = prebuiltComponentProvider.apply(nod.name);
|
||||||
if (factory == null) {
|
if (factory == null) {
|
||||||
throw new IllegalArgumentException("failed to find global " + componentType + " under [" + nod.name + "]");
|
// if there's no prebuilt component, try loading a global one to build with no settings
|
||||||
|
factory = globalComponentProvider.apply(nod.name);
|
||||||
|
if (factory == null) {
|
||||||
|
throw new IllegalArgumentException("failed to find global " + componentType + " under [" + nod.name + "]");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return factory.get(environment, nod.name);
|
return factory.get(environment, nod.name);
|
||||||
} else {
|
} else {
|
||||||
|
@ -219,25 +224,26 @@ public final class AnalysisRegistry implements Closeable {
|
||||||
public NamedAnalyzer buildCustomAnalyzer(IndexSettings indexSettings, boolean normalizer, NameOrDefinition tokenizer,
|
public NamedAnalyzer buildCustomAnalyzer(IndexSettings indexSettings, boolean normalizer, NameOrDefinition tokenizer,
|
||||||
List<NameOrDefinition> charFilters, List<NameOrDefinition> tokenFilters) throws IOException {
|
List<NameOrDefinition> charFilters, List<NameOrDefinition> tokenFilters) throws IOException {
|
||||||
TokenizerFactory tokenizerFactory
|
TokenizerFactory tokenizerFactory
|
||||||
= getComponentFactory(indexSettings, tokenizer, "tokenizer", this::getTokenizerProvider, this::getTokenizerProvider);
|
= getComponentFactory(indexSettings, tokenizer, "tokenizer",
|
||||||
|
this::getTokenizerProvider, prebuiltAnalysis::getTokenizerFactory, this::getTokenizerProvider);
|
||||||
|
|
||||||
List<CharFilterFactory> charFilterFactories = new ArrayList<>();
|
List<CharFilterFactory> charFilterFactories = new ArrayList<>();
|
||||||
for (NameOrDefinition nod : charFilters) {
|
for (NameOrDefinition nod : charFilters) {
|
||||||
charFilterFactories.add(getComponentFactory(indexSettings, nod, "char_filter",
|
charFilterFactories.add(getComponentFactory(indexSettings, nod, "char_filter",
|
||||||
this::getCharFilterProvider, this::getCharFilterProvider));
|
this::getCharFilterProvider, prebuiltAnalysis::getCharFilterFactory, this::getCharFilterProvider));
|
||||||
}
|
}
|
||||||
|
|
||||||
List<TokenFilterFactory> tokenFilterFactories = new ArrayList<>();
|
List<TokenFilterFactory> tokenFilterFactories = new ArrayList<>();
|
||||||
for (NameOrDefinition nod : tokenFilters) {
|
for (NameOrDefinition nod : tokenFilters) {
|
||||||
TokenFilterFactory tff = getComponentFactory(indexSettings, nod, "filter",
|
TokenFilterFactory tff = getComponentFactory(indexSettings, nod, "filter",
|
||||||
this::getTokenFilterProvider, this::getTokenFilterProvider);
|
this::getTokenFilterProvider, prebuiltAnalysis::getTokenFilterFactory, this::getTokenFilterProvider);
|
||||||
if (normalizer && tff instanceof NormalizingTokenFilterFactory == false) {
|
if (normalizer && tff instanceof NormalizingTokenFilterFactory == false) {
|
||||||
throw new IllegalArgumentException("Custom normalizer may not use filter [" + tff.name() + "]");
|
throw new IllegalArgumentException("Custom normalizer may not use filter [" + tff.name() + "]");
|
||||||
}
|
}
|
||||||
tff = tff.getChainAwareTokenFilterFactory(tokenizerFactory, charFilterFactories, tokenFilterFactories, name -> {
|
tff = tff.getChainAwareTokenFilterFactory(tokenizerFactory, charFilterFactories, tokenFilterFactories, name -> {
|
||||||
try {
|
try {
|
||||||
return getComponentFactory(indexSettings, new NameOrDefinition(name), "filter",
|
return getComponentFactory(indexSettings, new NameOrDefinition(name), "filter",
|
||||||
this::getTokenFilterProvider, this::getTokenFilterProvider);
|
this::getTokenFilterProvider, prebuiltAnalysis::getTokenFilterFactory, this::getTokenFilterProvider);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new UncheckedIOException(e);
|
throw new UncheckedIOException(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,7 +142,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<PreConfiguredCharFilter> getPreConfiguredCharFilters() {
|
public List<PreConfiguredCharFilter> getPreConfiguredCharFilters() {
|
||||||
return singletonList(PreConfiguredCharFilter.singleton("append_foo", false, reader -> new AppendCharFilter(reader, "foo")));
|
return singletonList(PreConfiguredCharFilter.singleton("append", false, reader -> new AppendCharFilter(reader, "foo")));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
registry = new AnalysisModule(environment, singletonList(plugin)).getAnalysisRegistry();
|
registry = new AnalysisModule(environment, singletonList(plugin)).getAnalysisRegistry();
|
||||||
|
@ -171,24 +171,11 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
||||||
List<AnalyzeAction.AnalyzeToken> tokens = analyze.getTokens();
|
List<AnalyzeAction.AnalyzeToken> tokens = analyze.getTokens();
|
||||||
assertEquals(4, tokens.size());
|
assertEquals(4, tokens.size());
|
||||||
|
|
||||||
// Refer to a token filter by its type so we get its default configuration
|
|
||||||
request = new AnalyzeAction.Request();
|
|
||||||
request.text("the qu1ck brown fox");
|
|
||||||
request.tokenizer("standard");
|
|
||||||
request.addTokenFilter("mock");
|
|
||||||
analyze
|
|
||||||
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
|
||||||
tokens = analyze.getTokens();
|
|
||||||
assertEquals(3, tokens.size());
|
|
||||||
assertEquals("qu1ck", tokens.get(0).getTerm());
|
|
||||||
assertEquals("brown", tokens.get(1).getTerm());
|
|
||||||
assertEquals("fox", tokens.get(2).getTerm());
|
|
||||||
|
|
||||||
// We can refer to a pre-configured token filter by its name to get it
|
// We can refer to a pre-configured token filter by its name to get it
|
||||||
request = new AnalyzeAction.Request();
|
request = new AnalyzeAction.Request();
|
||||||
request.text("the qu1ck brown fox");
|
request.text("the qu1ck brown fox");
|
||||||
request.tokenizer("standard");
|
request.tokenizer("standard");
|
||||||
request.addCharFilter("append_foo");
|
request.addCharFilter("append"); // <-- no config, so use preconfigured filter
|
||||||
analyze
|
analyze
|
||||||
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
||||||
tokens = analyze.getTokens();
|
tokens = analyze.getTokens();
|
||||||
|
@ -198,31 +185,32 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
||||||
assertEquals("brown", tokens.get(2).getTerm());
|
assertEquals("brown", tokens.get(2).getTerm());
|
||||||
assertEquals("foxfoo", tokens.get(3).getTerm());
|
assertEquals("foxfoo", tokens.get(3).getTerm());
|
||||||
|
|
||||||
// We can refer to a token filter by its type to get its default configuration
|
// If the preconfigured filter doesn't exist, we use a global filter with no settings
|
||||||
request = new AnalyzeAction.Request();
|
request = new AnalyzeAction.Request();
|
||||||
request.text("the qu1ck brown fox");
|
request.text("the qu1ck brown fox");
|
||||||
request.tokenizer("standard");
|
request.tokenizer("standard");
|
||||||
request.addCharFilter("append");
|
request.addTokenFilter("mock"); // <-- not preconfigured, but a global one available
|
||||||
request.text("the qu1ck brown fox");
|
|
||||||
analyze
|
analyze
|
||||||
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
||||||
tokens = analyze.getTokens();
|
tokens = analyze.getTokens();
|
||||||
assertEquals(4, tokens.size());
|
assertEquals(3, tokens.size());
|
||||||
assertEquals("the", tokens.get(0).getTerm());
|
assertEquals("qu1ck", tokens.get(0).getTerm());
|
||||||
assertEquals("qu1ck", tokens.get(1).getTerm());
|
assertEquals("brown", tokens.get(1).getTerm());
|
||||||
assertEquals("brown", tokens.get(2).getTerm());
|
assertEquals("fox", tokens.get(2).getTerm());
|
||||||
assertEquals("foxbar", tokens.get(3).getTerm());
|
|
||||||
|
|
||||||
// We can pass a new configuration
|
|
||||||
request = new AnalyzeAction.Request();
|
|
||||||
request.text("the qu1ck brown fox");
|
|
||||||
request.tokenizer("standard");
|
|
||||||
Map<String, Object> tokenFilterConfig = new HashMap<>();
|
Map<String, Object> tokenFilterConfig = new HashMap<>();
|
||||||
tokenFilterConfig.put("type", "mock");
|
tokenFilterConfig.put("type", "mock");
|
||||||
tokenFilterConfig.put("stopword", "brown");
|
tokenFilterConfig.put("stopword", "brown");
|
||||||
request.addTokenFilter(tokenFilterConfig);
|
|
||||||
request.addCharFilter("append");
|
Map<String, Object> charFilterConfig = new HashMap<>();
|
||||||
|
charFilterConfig.put("type", "append");
|
||||||
|
|
||||||
|
// We can build a new char filter to get default values
|
||||||
|
request = new AnalyzeAction.Request();
|
||||||
request.text("the qu1ck brown fox");
|
request.text("the qu1ck brown fox");
|
||||||
|
request.tokenizer("standard");
|
||||||
|
request.addTokenFilter(tokenFilterConfig);
|
||||||
|
request.addCharFilter(charFilterConfig); // <-- basic config, uses defaults
|
||||||
analyze
|
analyze
|
||||||
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
||||||
tokens = analyze.getTokens();
|
tokens = analyze.getTokens();
|
||||||
|
@ -230,6 +218,21 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
||||||
assertEquals("the", tokens.get(0).getTerm());
|
assertEquals("the", tokens.get(0).getTerm());
|
||||||
assertEquals("qu1ck", tokens.get(1).getTerm());
|
assertEquals("qu1ck", tokens.get(1).getTerm());
|
||||||
assertEquals("foxbar", tokens.get(2).getTerm());
|
assertEquals("foxbar", tokens.get(2).getTerm());
|
||||||
|
|
||||||
|
// We can pass a new configuration
|
||||||
|
request = new AnalyzeAction.Request();
|
||||||
|
request.text("the qu1ck brown fox");
|
||||||
|
request.tokenizer("standard");
|
||||||
|
request.addTokenFilter(tokenFilterConfig);
|
||||||
|
charFilterConfig.put("suffix", "baz");
|
||||||
|
request.addCharFilter(charFilterConfig);
|
||||||
|
analyze
|
||||||
|
= TransportAnalyzeAction.analyze(request, registry, null, maxTokenCount);
|
||||||
|
tokens = analyze.getTokens();
|
||||||
|
assertEquals(3, tokens.size());
|
||||||
|
assertEquals("the", tokens.get(0).getTerm());
|
||||||
|
assertEquals("qu1ck", tokens.get(1).getTerm());
|
||||||
|
assertEquals("foxbaz", tokens.get(2).getTerm());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testFillsAttributes() throws IOException {
|
public void testFillsAttributes() throws IOException {
|
||||||
|
|
Loading…
Reference in New Issue