This commit adds support for rules with multiple tokens on LHS, also known as "contraction rules", into stemmer override token filter. Contraction rules are handy into translating multiple inflected words into the same root form. One side effect of this change is that it brings stemmer override rules format closer to synonym rules format so that it makes it easier to translate one into another. This change also makes stemmer override rules parser more strict so that it should catch more errors which were previously accepted. Closes #56113
This commit is contained in:
parent
4263c25b2f
commit
a7c36c8af5
|
@ -8,7 +8,7 @@ Overrides stemming algorithms, by applying a custom mapping, then
|
|||
protecting these terms from being modified by stemmers. Must be placed
|
||||
before any stemming filters.
|
||||
|
||||
Rules are separated by `=>`
|
||||
Rules are mappings in the form of `token1[, ..., tokenN] => override`.
|
||||
|
||||
[cols="<,<",options="header",]
|
||||
|=======================================================================
|
||||
|
@ -69,7 +69,7 @@ PUT /my_index
|
|||
"custom_stems" : {
|
||||
"type" : "stemmer_override",
|
||||
"rules" : [
|
||||
"running => run",
|
||||
"running, runs => run",
|
||||
"stemmer => stemmer"
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
running => run
|
||||
running, runs => run
|
||||
|
||||
stemmer => stemmer
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.analysis.common;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter.StemmerOverrideMap;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -57,19 +56,23 @@ public class StemmerOverrideTokenFilterFactory extends AbstractTokenFilterFactor
|
|||
|
||||
static void parseRules(List<String> rules, StemmerOverrideFilter.Builder builder, String mappingSep) {
|
||||
for (String rule : rules) {
|
||||
String key, override;
|
||||
List<String> mapping = Strings.splitSmart(rule, mappingSep, false);
|
||||
if (mapping.size() == 2) {
|
||||
key = mapping.get(0).trim();
|
||||
override = mapping.get(1).trim();
|
||||
} else {
|
||||
String[] sides = rule.split(mappingSep, -1);
|
||||
if (sides.length != 2) {
|
||||
throw new RuntimeException("Invalid Keyword override Rule:" + rule);
|
||||
}
|
||||
|
||||
if (key.isEmpty() || override.isEmpty()) {
|
||||
String[] keys = sides[0].split(",", -1);
|
||||
String override = sides[1].trim();
|
||||
if (override.isEmpty() || override.indexOf(',') != -1) {
|
||||
throw new RuntimeException("Invalid Keyword override Rule:" + rule);
|
||||
} else {
|
||||
builder.add(key, override);
|
||||
}
|
||||
|
||||
for (String key : keys) {
|
||||
String trimmedKey = key.trim();
|
||||
if (trimmedKey.isEmpty()) {
|
||||
throw new RuntimeException("Invalid Keyword override Rule:" + rule);
|
||||
}
|
||||
builder.add(trimmedKey, override);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
|
||||
public class StemmerOverrideTokenFilterFactoryTests extends ESTokenStreamTestCase {
|
||||
@Rule
|
||||
public ExpectedException expectedException = ExpectedException.none();
|
||||
|
||||
public static TokenFilterFactory create(String... rules) throws IOException {
|
||||
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(
|
||||
Settings.builder()
|
||||
.put("index.analysis.filter.my_stemmer_override.type", "stemmer_override")
|
||||
.putList("index.analysis.filter.my_stemmer_override.rules", rules)
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||
.build(),
|
||||
new CommonAnalysisPlugin());
|
||||
|
||||
return analysis.tokenFilter.get("my_stemmer_override");
|
||||
}
|
||||
|
||||
public void testRuleError() {
|
||||
for (String rule : Arrays.asList(
|
||||
"", // empty
|
||||
"a", // no arrow
|
||||
"a=>b=>c", // multiple arrows
|
||||
"=>a=>b", // multiple arrows
|
||||
"a=>", // no override
|
||||
"a=>b,c", // multiple overrides
|
||||
"=>a", // no keys
|
||||
"a,=>b" // empty key
|
||||
)) {
|
||||
expectThrows(RuntimeException.class, String.format(
|
||||
Locale.ROOT, "Should fail for invalid rule: '%s'", rule
|
||||
), () -> create(rule));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRulesOk() throws IOException {
|
||||
TokenFilterFactory tokenFilterFactory = create(
|
||||
"a => 1",
|
||||
"b,c => 2"
|
||||
);
|
||||
Tokenizer tokenizer = new WhitespaceTokenizer();
|
||||
tokenizer.setReader(new StringReader("a b c"));
|
||||
assertTokenStreamContents(tokenFilterFactory.create(tokenizer), new String[]{"1", "2", "2"});
|
||||
}
|
||||
}
|
|
@ -636,7 +636,10 @@
|
|||
language: dutch
|
||||
my_stemmer_override:
|
||||
type: stemmer_override
|
||||
rules: ["zoeken => override"]
|
||||
rules: [
|
||||
"zoeken => override",
|
||||
"foo, bar => baz"
|
||||
]
|
||||
- do:
|
||||
indices.analyze:
|
||||
index: test
|
||||
|
@ -647,6 +650,18 @@
|
|||
- length: { tokens: 1 }
|
||||
- match: { tokens.0.token: override }
|
||||
|
||||
# Test rule with multiple tokens on LHS:
|
||||
- do:
|
||||
indices.analyze:
|
||||
index: test
|
||||
body:
|
||||
text: foo bar
|
||||
tokenizer: whitespace
|
||||
filter: [my_stemmer_override]
|
||||
- length: { tokens: 2 }
|
||||
- match: { tokens.0.token: baz }
|
||||
- match: { tokens.1.token: baz }
|
||||
|
||||
---
|
||||
"decompounder":
|
||||
- do:
|
||||
|
|
Loading…
Reference in New Issue