Expose `preserve_original` in `edge_ngram` token filter (#55766)

The Lucene `preserve_original` setting is currently not supported in the `edge_ngram`
token filter. This change adds it with a default value of `false`.

Closes #55767
This commit is contained in:
Amit Khandelwal 2020-04-28 13:52:59 +05:30 committed by Christoph Büscher
parent a5cf4712e5
commit 126e4acca8
3 changed files with 75 additions and 3 deletions

View File

@ -136,7 +136,7 @@ The filter produces the following tokens:
==== Add to an analyzer
The following <<indices-create-index,create index API>> request uses the
`edge_ngram` filter to configure a new
`edge_ngram` filter to configure a new
<<analysis-custom-analyzer,custom analyzer>>.
[source,console]
@ -173,6 +173,10 @@ See <<analysis-edgengram-tokenfilter-max-gram-limits>>.
(Optional, integer)
Minimum character length of a gram. Defaults to `1`.
`preserve_original`::
(Optional, boolean)
Emits original token when set to `true`. Defaults to `false`.
`side`::
+
--

View File

@ -44,12 +44,15 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
public static final int SIDE_FRONT = 1;
public static final int SIDE_BACK = 2;
private final int side;
private final boolean preserveOriginal;
private static final String PRESERVE_ORIG_KEY = "preserve_original";
EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", 1);
this.maxGram = settings.getAsInt("max_gram", 2);
this.side = parseSide(settings.get("side", "front"));
this.preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, false);
}
static int parseSide(String side) {
@ -69,8 +72,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
result = new ReverseStringFilter(result);
}
// TODO: Expose preserveOriginal
result = new EdgeNGramTokenFilter(result, minGram, maxGram, false);
result = new EdgeNGramTokenFilter(result, minGram, maxGram, preserveOriginal);
// side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
if (side == SIDE_BACK) {

View File

@ -0,0 +1,66 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
import java.io.StringReader;
public class EdgeNGramTokenFilterFactoryTests extends ESTokenStreamTestCase {
public void testDefault() throws IOException {
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(
Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_edge_ngram.type", "edge_ngram")
.build(),
new CommonAnalysisPlugin());
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_edge_ngram");
String source = "foo";
String[] expected = new String[]{"f", "fo"};
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
public void testPreserveOriginal() throws IOException {
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(
Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_edge_ngram.type", "edge_ngram")
.put("index.analysis.filter.my_edge_ngram.preserve_original", true)
.build(),
new CommonAnalysisPlugin());
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_edge_ngram");
String source = "foo";
String[] expected = new String[]{"f", "fo", "foo"};
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
}