Analysis enhancement - add preserve_original setting in ngram-token-filter (#55432) (#56100)

Authored-by: Amit Khandelwal <amitmbm87@gmail.com>
This commit is contained in:
markharwood 2020-05-04 11:31:28 +01:00 committed by GitHub
parent c65f828cb7
commit e197b6c45b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 74 additions and 3 deletions

View File

@ -185,6 +185,10 @@ Maximum length of characters in a gram. Defaults to `2`.
(Optional, integer)
Minimum length of characters in a gram. Defaults to `1`.
`preserve_original`::
(Optional, boolean)
Emits original token when set to `true`. Defaults to `false`.
You can use the <<index-max-ngram-diff,`index.max_ngram_diff`>> index-level
setting to control the maximum allowed difference between the `max_gram` and
`min_gram` values.

View File

@ -37,8 +37,9 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
= new DeprecationLogger(LogManager.getLogger(NGramTokenFilterFactory.class));
private final int minGram;
private final int maxGram;
private final boolean preserveOriginal;
private static final String PRESERVE_ORIG_KEY = "preserve_original";
NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings);
@ -58,12 +59,12 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
+ "expected difference must be less than or equal to: [" + maxAllowedNgramDiff + "]");
}
}
preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, false);
}
@Override
public TokenStream create(TokenStream tokenStream) {
// TODO: Expose preserveOriginal
return new NGramTokenFilter(tokenStream, minGram, maxGram, false);
return new NGramTokenFilter(tokenStream, minGram, maxGram, preserveOriginal);
}
@Override

View File

@ -0,0 +1,66 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
import java.io.StringReader;
public class NGramTokenFilterFactoryTests extends ESTokenStreamTestCase {
public void testDefault() throws IOException {
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(
Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_ngram.type", "ngram")
.build(),
new CommonAnalysisPlugin());
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ngram");
String source = "foo";
String[] expected = new String[]{"f", "fo", "o", "oo", "o"};
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
public void testPreserveOriginal() throws IOException {
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(
Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("index.analysis.filter.my_ngram.type", "ngram")
.put("index.analysis.filter.my_ngram.preserve_original", true)
.build(),
new CommonAnalysisPlugin());
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_ngram");
String source = "foo";
String[] expected = new String[]{"f", "fo", "o", "oo", "o", "foo"};
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
}