mirror of https://github.com/apache/lucene.git
LUCENE-7287: add UkrainianMorfologikAnalyzer, a dictionary-based analyzer for the Ukrainian language
This commit is contained in:
parent
113afcf024
commit
4a71e03a32
|
@ -19,6 +19,10 @@ New Features
|
||||||
recording key/values from a provided iterable based on when the
|
recording key/values from a provided iterable based on when the
|
||||||
commit actually takes place (Mike McCandless)
|
commit actually takes place (Mike McCandless)
|
||||||
|
|
||||||
|
* LUCENE-7287: UkrainianMorfologikAnalyzer is a new dictionary-based
|
||||||
|
analyzer for the Ukrainian language (Andriy Rysin via Mike
|
||||||
|
McCandless)
|
||||||
|
|
||||||
Improvements
|
Improvements
|
||||||
|
|
||||||
* LUCENE-7323: Compound file writing now verifies the incoming
|
* LUCENE-7323: Compound file writing now verifies the incoming
|
||||||
|
|
|
@ -0,0 +1,154 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.lucene.analysis.uk;
|
||||||
|
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.Reader;
|
||||||
|
import java.nio.charset.StandardCharsets;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
|
import org.apache.lucene.analysis.CharArraySet;
|
||||||
|
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||||
|
import org.apache.lucene.analysis.StopFilter;
|
||||||
|
import org.apache.lucene.analysis.StopwordAnalyzerBase;
|
||||||
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
|
import org.apache.lucene.analysis.WordlistLoader;
|
||||||
|
import org.apache.lucene.analysis.charfilter.MappingCharFilter;
|
||||||
|
import org.apache.lucene.analysis.charfilter.NormalizeCharMap;
|
||||||
|
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||||
|
import org.apache.lucene.analysis.morfologik.MorfologikFilter;
|
||||||
|
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||||
|
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||||
|
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||||
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
|
||||||
|
import morfologik.stemming.Dictionary;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A dictionary-based {@link Analyzer} for Ukrainian.
|
||||||
|
*/
|
||||||
|
public final class UkrainianMorfologikAnalyzer extends StopwordAnalyzerBase {
|
||||||
|
private final CharArraySet stemExclusionSet;
|
||||||
|
|
||||||
|
/** File containing default Ukrainian stopwords. */
|
||||||
|
public final static String DEFAULT_STOPWORD_FILE = "/org/apache/lucene/analysis/uk/stopwords.txt";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an unmodifiable instance of the default stop words set.
|
||||||
|
* @return default stop words set.
|
||||||
|
*/
|
||||||
|
public static CharArraySet getDefaultStopSet(){
|
||||||
|
return DefaultSetHolder.DEFAULT_STOP_SET;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomically loads the DEFAULT_STOP_SET in a lazy fashion once the outer class
|
||||||
|
* accesses the static final set the first time.;
|
||||||
|
*/
|
||||||
|
private static class DefaultSetHolder {
|
||||||
|
static final CharArraySet DEFAULT_STOP_SET;
|
||||||
|
|
||||||
|
static {
|
||||||
|
try {
|
||||||
|
DEFAULT_STOP_SET = WordlistLoader.getSnowballWordSet(IOUtils.getDecodingReader(SnowballFilter.class,
|
||||||
|
DEFAULT_STOPWORD_FILE, StandardCharsets.UTF_8));
|
||||||
|
} catch (IOException ex) {
|
||||||
|
// default set should always be present as it is part of the
|
||||||
|
// distribution (JAR)
|
||||||
|
throw new RuntimeException("Unable to load default stopword set");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds an analyzer with the default stop words: {@link #DEFAULT_STOPWORD_FILE}.
|
||||||
|
*/
|
||||||
|
public UkrainianMorfologikAnalyzer() {
|
||||||
|
this(DefaultSetHolder.DEFAULT_STOP_SET);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds an analyzer with the given stop words.
|
||||||
|
*
|
||||||
|
* @param stopwords a stopword set
|
||||||
|
*/
|
||||||
|
public UkrainianMorfologikAnalyzer(CharArraySet stopwords) {
|
||||||
|
this(stopwords, CharArraySet.EMPTY_SET);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
|
||||||
|
* provided this analyzer will add a {@link SetKeywordMarkerFilter} before
|
||||||
|
* stemming.
|
||||||
|
*
|
||||||
|
* @param stopwords a stopword set
|
||||||
|
* @param stemExclusionSet a set of terms not to be stemmed
|
||||||
|
*/
|
||||||
|
public UkrainianMorfologikAnalyzer(CharArraySet stopwords, CharArraySet stemExclusionSet) {
|
||||||
|
super(stopwords);
|
||||||
|
this.stemExclusionSet = CharArraySet.unmodifiableSet(CharArraySet.copy(stemExclusionSet));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Reader initReader(String fieldName, Reader reader) {
|
||||||
|
NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
|
||||||
|
builder.add("\u2019", "'");
|
||||||
|
builder.add("\u02BC", "'");
|
||||||
|
builder.add("\u0301", "");
|
||||||
|
NormalizeCharMap normMap = builder.build();
|
||||||
|
|
||||||
|
reader = new MappingCharFilter(normMap, reader);
|
||||||
|
return reader;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a
|
||||||
|
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
|
||||||
|
* which tokenizes all the text in the provided {@link Reader}.
|
||||||
|
*
|
||||||
|
* @return A
|
||||||
|
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
|
||||||
|
* built from an {@link StandardTokenizer} filtered with
|
||||||
|
* {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
|
||||||
|
* , {@link SetKeywordMarkerFilter} if a stem exclusion set is
|
||||||
|
* provided and {@link MorfologikFilter} on the Ukrainian dictionary.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
protected TokenStreamComponents createComponents(String fieldName) {
|
||||||
|
Tokenizer source = new StandardTokenizer();
|
||||||
|
TokenStream result = new StandardFilter(source);
|
||||||
|
result = new LowerCaseFilter(result);
|
||||||
|
result = new StopFilter(result, stopwords);
|
||||||
|
|
||||||
|
if (stemExclusionSet.isEmpty() == false) {
|
||||||
|
result = new SetKeywordMarkerFilter(result, stemExclusionSet);
|
||||||
|
}
|
||||||
|
|
||||||
|
result = new MorfologikFilter(result, getDictionary());
|
||||||
|
return new TokenStreamComponents(source, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Dictionary getDictionary() {
|
||||||
|
try {
|
||||||
|
return Dictionary.read(UkrainianMorfologikAnalyzer.class.getResource("/org/apache/lucene/analysis/uk/ukrainian.dict"));
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Analyzer for Ukrainian.
|
||||||
|
*/
|
||||||
|
package org.apache.lucene.analysis.uk;
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
@ -0,0 +1,10 @@
|
||||||
|
#
|
||||||
|
# Dictionary properties.
|
||||||
|
#
|
||||||
|
|
||||||
|
fsa.dict.separator=+
|
||||||
|
fsa.dict.encoding=cp1251
|
||||||
|
|
||||||
|
fsa.dict.encoder=SUFFIX
|
||||||
|
|
||||||
|
fsa.dict.speller.ignore-diacritics=false
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.lucene.analysis.uk;
|
||||||
|
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
|
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test case for UkrainianAnalyzer.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class TestUkrainianAnalyzer extends BaseTokenStreamTestCase {
|
||||||
|
|
||||||
|
/** Check that UkrainianAnalyzer doesn't discard any numbers */
|
||||||
|
public void testDigitsInUkrainianCharset() throws IOException {
|
||||||
|
UkrainianMorfologikAnalyzer ra = new UkrainianMorfologikAnalyzer();
|
||||||
|
assertAnalyzesTo(ra, "text 1000", new String[] { "text", "1000" });
|
||||||
|
ra.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testReusableTokenStream() throws Exception {
|
||||||
|
Analyzer a = new UkrainianMorfologikAnalyzer();
|
||||||
|
assertAnalyzesTo(a, "Ця п'єса у свою чергу рухається по колу.",
|
||||||
|
new String[] { "п'єса", "черга", "рухатися", "кола", "коло", "коло", "кіл", "кіл" });
|
||||||
|
a.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testSpecialCharsTokenStream() throws Exception {
|
||||||
|
Analyzer a = new UkrainianMorfologikAnalyzer();
|
||||||
|
assertAnalyzesTo(a, "Ця пʼєса, у сво́ю чергу, рухається по колу.",
|
||||||
|
new String[] { "п'єса", "черга", "рухатися", "кола", "коло", "коло", "кіл", "кіл" });
|
||||||
|
a.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testCapsTokenStream() throws Exception {
|
||||||
|
Analyzer a = new UkrainianMorfologikAnalyzer();
|
||||||
|
assertAnalyzesTo(a, "Цей Чайковський.",
|
||||||
|
new String[] { "чайковський" });
|
||||||
|
a.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** blast some random strings through the analyzer */
|
||||||
|
public void testRandomStrings() throws Exception {
|
||||||
|
Analyzer analyzer = new UkrainianMorfologikAnalyzer();
|
||||||
|
checkRandomData(random(), analyzer, 1000*RANDOM_MULTIPLIER);
|
||||||
|
analyzer.close();
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue