Move phonetic token filter to a plugin, closes #1594.

This commit is contained in:
Shay Banon 2012-01-07 23:18:30 +02:00
parent aec5af3800
commit 3d51553cf2
8 changed files with 1 additions and 344 deletions

12
pom.xml
View File

@ -114,13 +114,6 @@
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.6</version>
<scope>compile</scope>
</dependency>
<dependency> <dependency>
<groupId>joda-time</groupId> <groupId>joda-time</groupId>
<artifactId>joda-time</artifactId> <artifactId>joda-time</artifactId>
@ -312,7 +305,6 @@
<include>net.sf.trove4j:trove4j</include> <include>net.sf.trove4j:trove4j</include>
<include>org.elasticsearch:es-jsr166y</include> <include>org.elasticsearch:es-jsr166y</include>
<include>org.elasticsearch:es-jsr166e</include> <include>org.elasticsearch:es-jsr166e</include>
<include>commons-codec:commons-codec</include>
<include>org.mvel:mvel2</include> <include>org.mvel:mvel2</include>
<include>org.codehaus.jackson:jackson-core-asl</include> <include>org.codehaus.jackson:jackson-core-asl</include>
<include>org.codehaus.jackson:jackson-smile</include> <include>org.codehaus.jackson:jackson-smile</include>
@ -338,10 +330,6 @@
<pattern>jsr166e</pattern> <pattern>jsr166e</pattern>
<shadedPattern>org.elasticsearch.common.util.concurrent.jsr166e</shadedPattern> <shadedPattern>org.elasticsearch.common.util.concurrent.jsr166e</shadedPattern>
</relocation> </relocation>
<relocation>
<pattern>org.apache.commons.codec</pattern>
<shadedPattern>org.elasticsearch.common.codec</shadedPattern>
</relocation>
<relocation> <relocation>
<pattern>org.mvel2</pattern> <pattern>org.mvel2</pattern>
<shadedPattern>org.elasticsearch.common.mvel2</shadedPattern> <shadedPattern>org.elasticsearch.common.mvel2</shadedPattern>

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.NoClassSettingsException;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory; import org.elasticsearch.index.analysis.compound.DictionaryCompoundWordTokenFilterFactory;
import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory; import org.elasticsearch.index.analysis.compound.HyphenationCompoundWordTokenFilterFactory;
import org.elasticsearch.index.analysis.phonetic.PhoneticTokenFilterFactory;
import org.elasticsearch.indices.analysis.IndicesAnalysisService; import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import java.util.LinkedList; import java.util.LinkedList;
@ -452,7 +451,6 @@ public class AnalysisModule extends AbstractModule {
tokenFiltersBindings.processTokenFilter("elision", ElisionTokenFilterFactory.class); tokenFiltersBindings.processTokenFilter("elision", ElisionTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("pattern_replace", PatternReplaceTokenFilterFactory.class); tokenFiltersBindings.processTokenFilter("pattern_replace", PatternReplaceTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("phonetic", PhoneticTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("dictionary_decompounder", DictionaryCompoundWordTokenFilterFactory.class); tokenFiltersBindings.processTokenFilter("dictionary_decompounder", DictionaryCompoundWordTokenFilterFactory.class);
tokenFiltersBindings.processTokenFilter("hyphenation_decompounder", HyphenationCompoundWordTokenFilterFactory.class); tokenFiltersBindings.processTokenFilter("hyphenation_decompounder", HyphenationCompoundWordTokenFilterFactory.class);

View File

@ -1,111 +0,0 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis.phonetic;
import org.apache.commons.codec.language.DoubleMetaphone;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import java.io.IOException;
import java.util.LinkedList;
public final class DoubleMetaphoneFilter extends TokenFilter {
private static final String TOKEN_TYPE = "DoubleMetaphone";
private final LinkedList<State> remainingTokens = new LinkedList<State>();
private final DoubleMetaphone encoder;
private final boolean inject;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class);
public DoubleMetaphoneFilter(TokenStream input, DoubleMetaphone encoder, boolean inject) {
super(input);
this.encoder = encoder;
this.inject = inject;
}
@Override
public boolean incrementToken() throws IOException {
for (; ; ) {
if (!remainingTokens.isEmpty()) {
// clearAttributes(); // not currently necessary
restoreState(remainingTokens.removeFirst());
return true;
}
if (!input.incrementToken()) return false;
int len = termAtt.length();
if (len == 0) return true; // pass through zero length terms
int firstAlternativeIncrement = inject ? 0 : posAtt.getPositionIncrement();
String v = termAtt.toString();
String primaryPhoneticValue = encoder.doubleMetaphone(v);
String alternatePhoneticValue = encoder.doubleMetaphone(v, true);
// a flag to lazily save state if needed... this avoids a save/restore when only
// one token will be generated.
boolean saveState = inject;
if (primaryPhoneticValue != null && primaryPhoneticValue.length() > 0 && !primaryPhoneticValue.equals(v)) {
if (saveState) {
remainingTokens.addLast(captureState());
}
posAtt.setPositionIncrement(firstAlternativeIncrement);
firstAlternativeIncrement = 0;
termAtt.setEmpty().append(primaryPhoneticValue);
saveState = true;
}
if (alternatePhoneticValue != null && alternatePhoneticValue.length() > 0
&& !alternatePhoneticValue.equals(primaryPhoneticValue)
&& !primaryPhoneticValue.equals(v)) {
if (saveState) {
remainingTokens.addLast(captureState());
saveState = false;
}
posAtt.setPositionIncrement(firstAlternativeIncrement);
termAtt.setEmpty().append(alternatePhoneticValue);
saveState = true;
}
// Just one token to return, so no need to capture/restore
// any state, simply return it.
if (remainingTokens.isEmpty()) {
return true;
}
if (saveState) {
remainingTokens.addLast(captureState());
}
}
}
@Override
public void reset() throws IOException {
input.reset();
remainingTokens.clear();
}
}

View File

@ -1,100 +0,0 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis.phonetic;
import org.apache.commons.codec.Encoder;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import java.io.IOException;
/**
* Create tokens for phonetic matches. See:
* http://jakarta.apache.org/commons/codec/api-release/org/apache/commons/codec/language/package-summary.html
*/
// LUCENE MONITOR
public class PhoneticFilter extends TokenFilter {
protected boolean inject = true;
protected Encoder encoder = null;
protected String name = null;
protected State save = null;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class);
public PhoneticFilter(TokenStream in, Encoder encoder, String name, boolean inject) {
super(in);
this.encoder = encoder;
this.name = name;
this.inject = inject;
}
@Override
public boolean incrementToken() throws IOException {
if (save != null) {
// clearAttributes(); // not currently necessary
restoreState(save);
save = null;
return true;
}
if (!input.incrementToken()) return false;
// pass through zero-length terms
if (termAtt.length() == 0) return true;
String value = termAtt.toString();
String phonetic = null;
try {
String v = encoder.encode(value).toString();
if (v.length() > 0 && !value.equals(v)) phonetic = v;
} catch (Exception ignored) {
} // just use the direct text
if (phonetic == null) return true;
if (!inject) {
// just modify this token
termAtt.setEmpty().append(phonetic);
return true;
}
// We need to return both the original and the phonetic tokens.
// to avoid a orig=captureState() change_to_phonetic() saved=captureState() restoreState(orig)
// we return the phonetic alternative first
int origOffset = posAtt.getPositionIncrement();
posAtt.setPositionIncrement(0);
save = captureState();
posAtt.setPositionIncrement(origOffset);
termAtt.setEmpty().append(phonetic);
return true;
}
@Override
public void reset() throws IOException {
input.reset();
save = null;
}
}

View File

@ -1,104 +0,0 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis.phonetic;
import org.apache.commons.codec.Encoder;
import org.apache.commons.codec.language.*;
import org.apache.commons.codec.language.bm.BeiderMorseEncoder;
import org.apache.commons.codec.language.bm.NameType;
import org.apache.commons.codec.language.bm.RuleType;
import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
import org.elasticsearch.index.analysis.AnalysisSettingsRequired;
import org.elasticsearch.index.settings.IndexSettings;
/**
*
*/
@AnalysisSettingsRequired
public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory {
private final Encoder encoder;
private final boolean replace;
@Inject
public PhoneticTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.replace = settings.getAsBoolean("replace", true);
String encoder = settings.get("encoder");
if (encoder == null) {
throw new ElasticSearchIllegalArgumentException("encoder must be set on phonetic token filter");
}
if ("metaphone".equalsIgnoreCase(encoder)) {
this.encoder = new Metaphone();
} else if ("soundex".equalsIgnoreCase(encoder)) {
this.encoder = new Soundex();
} else if ("caverphone1".equalsIgnoreCase(encoder)) {
this.encoder = new Caverphone1();
} else if ("caverphone2".equalsIgnoreCase(encoder)) {
this.encoder = new Caverphone2();
} else if ("caverphone".equalsIgnoreCase(encoder)) {
this.encoder = new Caverphone2();
} else if ("refined_soundex".equalsIgnoreCase(encoder) || "refinedSoundex".equalsIgnoreCase(encoder)) {
this.encoder = new RefinedSoundex();
} else if ("cologne".equalsIgnoreCase(encoder)) {
this.encoder = new ColognePhonetic();
} else if ("double_metaphone".equalsIgnoreCase(encoder) || "doubleMetaphone".equalsIgnoreCase(encoder)) {
DoubleMetaphone doubleMetaphone = new DoubleMetaphone();
doubleMetaphone.setMaxCodeLen(settings.getAsInt("max_code_len", doubleMetaphone.getMaxCodeLen()));
this.encoder = doubleMetaphone;
} else if ("bm".equalsIgnoreCase(encoder) || "beider_morse".equalsIgnoreCase(encoder)) {
BeiderMorseEncoder bm = new BeiderMorseEncoder();
String ruleType = settings.get("rule_type", "approx");
if ("approx".equalsIgnoreCase(ruleType)) {
bm.setRuleType(RuleType.APPROX);
} else if ("exact".equalsIgnoreCase(ruleType)) {
bm.setRuleType(RuleType.EXACT);
} else {
throw new ElasticSearchIllegalArgumentException("No matching rule type [" + ruleType + "] for beider morse encoder");
}
String nameType = settings.get("name_type", "generic");
if ("GENERIC".equalsIgnoreCase(nameType)) {
bm.setNameType(NameType.GENERIC);
} else if ("ASHKENAZI".equalsIgnoreCase(nameType)) {
bm.setNameType(NameType.ASHKENAZI);
} else if ("SEPHARDIC".equalsIgnoreCase(nameType)) {
bm.setNameType(NameType.SEPHARDIC);
}
this.encoder = bm;
} else {
throw new ElasticSearchIllegalArgumentException("unknown encoder [" + encoder + "] for phonetic token filter");
}
}
@Override
public TokenStream create(TokenStream tokenStream) {
if (encoder instanceof DoubleMetaphone) {
return new DoubleMetaphoneFilter(tokenStream, (DoubleMetaphone) encoder, !replace);
}
return new PhoneticFilter(tokenStream, encoder, name(), !replace);
}
}

View File

@ -32,11 +32,10 @@ import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNameModule; import org.elasticsearch.index.IndexNameModule;
import org.elasticsearch.index.analysis.*; import org.elasticsearch.index.analysis.*;
import org.elasticsearch.test.unit.index.analysis.filter1.MyFilterTokenFilterFactory;
import org.elasticsearch.index.analysis.phonetic.PhoneticTokenFilterFactory;
import org.elasticsearch.index.settings.IndexSettingsModule; import org.elasticsearch.index.settings.IndexSettingsModule;
import org.elasticsearch.indices.analysis.IndicesAnalysisModule; import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
import org.elasticsearch.indices.analysis.IndicesAnalysisService; import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import org.elasticsearch.test.unit.index.analysis.filter1.MyFilterTokenFilterFactory;
import org.hamcrest.MatcherAssert; import org.hamcrest.MatcherAssert;
import org.testng.annotations.Test; import org.testng.annotations.Test;
@ -109,12 +108,6 @@ public class AnalysisModuleTests {
analyzer = analysisService.analyzer("alias1").analyzer(); analyzer = analysisService.analyzer("alias1").analyzer();
assertThat(analyzer, instanceOf(StandardAnalyzer.class)); assertThat(analyzer, instanceOf(StandardAnalyzer.class));
// check phonetic
analyzer = analysisService.analyzer("custom3").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
assertThat(custom3.tokenFilters()[0], instanceOf(PhoneticTokenFilterFactory.class));
// check custom class name (my) // check custom class name (my)
analyzer = analysisService.analyzer("custom4").analyzer(); analyzer = analysisService.analyzer("custom4").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class)); assertThat(analyzer, instanceOf(CustomAnalyzer.class));

View File

@ -53,10 +53,6 @@
"tokenizer":"standard", "tokenizer":"standard",
"char_filter":["html_strip", "my_html"] "char_filter":["html_strip", "my_html"]
}, },
"custom3":{
"tokenizer":"standard",
"filter":["metaphone"]
},
"custom4":{ "custom4":{
"tokenizer":"standard", "tokenizer":"standard",
"filter":["my"] "filter":["my"]

View File

@ -38,9 +38,6 @@ index :
custom2 : custom2 :
tokenizer : standard tokenizer : standard
char_filter : [html_strip, my_html] char_filter : [html_strip, my_html]
custom3 :
tokenizer : standard
filter : [metaphone]
custom4 : custom4 :
tokenizer : standard tokenizer : standard
filter : [my] filter : [my]