initial commit

This commit is contained in:
Shay Banon 2012-01-07 23:06:50 +02:00
commit b33896fe77
12 changed files with 618 additions and 0 deletions

7
.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
/data
/work
/logs
/.idea
/target
.DS_Store
*.iml

12
README.md Normal file
View File

@ -0,0 +1,12 @@
Phonetic Analysis for ElasticSearch
===================================
The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch.
In order to install the plugin, simply run: `bin/plugin -install elasticsearch/elasticsearch-analysis-phonetic/1.1.0`.
---------------------------------------------
| Phonetic Analysis Plugin | ElasticSearch |
---------------------------------------------
| master | 0.18 -> master |
---------------------------------------------

137
pom.xml Normal file
View File

@ -0,0 +1,137 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<name>elasticsearch-analysis-phonetic</name>
<modelVersion>4.0.0</modelVersion>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch-analysis-phonetic</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<description>Phonetic Analysis for ElasticSearch</description>
<inceptionYear>2009</inceptionYear>
<licenses>
<license>
<name>The Apache Software License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git@github.com:elasticsearch/elasticsearch-analysis-phonetic.git</connection>
<developerConnection>scm:git:git@github.com:elasticsearch/elasticsearch-analysis-phonetic.git
</developerConnection>
<url>http://github.com/elasticsearch/elasticsearch-analysis-phonetic</url>
</scm>
<parent>
<groupId>org.sonatype.oss</groupId>
<artifactId>oss-parent</artifactId>
<version>7</version>
</parent>
<properties>
<elasticsearch.version>0.18.5</elasticsearch.version>
</properties>
<repositories>
</repositories>
<dependencies>
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<version>${elasticsearch.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.6</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>1.2.16</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>6.3.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
<version>1.3.RC2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>1.3.RC2</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>2.3.2</version>
<configuration>
<source>1.6</source>
<target>1.6</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.11</version>
<configuration>
<includes>
<include>**/*Tests.java</include>
</includes>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>2.1.2</version>
<executions>
<execution>
<id>attach-sources</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<outputDirectory>${project.build.directory}/releases/</outputDirectory>
<descriptors>
<descriptor>${basedir}/src/main/assemblies/plugin.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,26 @@
<?xml version="1.0"?>
<assembly>
<id></id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<includes>
<include>commons-codec:commons-codec</include>
</includes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -0,0 +1,111 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.commons.codec.language.DoubleMetaphone;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import java.io.IOException;
import java.util.LinkedList;
public final class DoubleMetaphoneFilter extends TokenFilter {
private static final String TOKEN_TYPE = "DoubleMetaphone";
private final LinkedList<State> remainingTokens = new LinkedList<State>();
private final DoubleMetaphone encoder;
private final boolean inject;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class);
public DoubleMetaphoneFilter(TokenStream input, DoubleMetaphone encoder, boolean inject) {
super(input);
this.encoder = encoder;
this.inject = inject;
}
@Override
public boolean incrementToken() throws IOException {
for (; ; ) {
if (!remainingTokens.isEmpty()) {
// clearAttributes(); // not currently necessary
restoreState(remainingTokens.removeFirst());
return true;
}
if (!input.incrementToken()) return false;
int len = termAtt.length();
if (len == 0) return true; // pass through zero length terms
int firstAlternativeIncrement = inject ? 0 : posAtt.getPositionIncrement();
String v = termAtt.toString();
String primaryPhoneticValue = encoder.doubleMetaphone(v);
String alternatePhoneticValue = encoder.doubleMetaphone(v, true);
// a flag to lazily save state if needed... this avoids a save/restore when only
// one token will be generated.
boolean saveState = inject;
if (primaryPhoneticValue != null && primaryPhoneticValue.length() > 0 && !primaryPhoneticValue.equals(v)) {
if (saveState) {
remainingTokens.addLast(captureState());
}
posAtt.setPositionIncrement(firstAlternativeIncrement);
firstAlternativeIncrement = 0;
termAtt.setEmpty().append(primaryPhoneticValue);
saveState = true;
}
if (alternatePhoneticValue != null && alternatePhoneticValue.length() > 0
&& !alternatePhoneticValue.equals(primaryPhoneticValue)
&& !primaryPhoneticValue.equals(v)) {
if (saveState) {
remainingTokens.addLast(captureState());
saveState = false;
}
posAtt.setPositionIncrement(firstAlternativeIncrement);
termAtt.setEmpty().append(alternatePhoneticValue);
saveState = true;
}
// Just one token to return, so no need to capture/restore
// any state, simply return it.
if (remainingTokens.isEmpty()) {
return true;
}
if (saveState) {
remainingTokens.addLast(captureState());
}
}
}
@Override
public void reset() throws IOException {
input.reset();
remainingTokens.clear();
}
}

View File

@ -0,0 +1,30 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
/**
*/
public class PhoneticAnalysisBinderProcessor extends AnalysisModule.AnalysisBinderProcessor {
@Override
public void processTokenFilters(TokenFiltersBindings tokenFiltersBindings) {
tokenFiltersBindings.processTokenFilter("phonetic", PhoneticTokenFilterFactory.class);
}
}

View File

@ -0,0 +1,100 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.commons.codec.Encoder;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import java.io.IOException;
/**
* Create tokens for phonetic matches. See:
* http://jakarta.apache.org/commons/codec/api-release/org/apache/commons/codec/language/package-summary.html
*/
// LUCENE MONITOR - No need for it in Lucene 3.6
public class PhoneticFilter extends TokenFilter {
protected boolean inject = true;
protected Encoder encoder = null;
protected String name = null;
protected State save = null;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class);
public PhoneticFilter(TokenStream in, Encoder encoder, String name, boolean inject) {
super(in);
this.encoder = encoder;
this.name = name;
this.inject = inject;
}
@Override
public boolean incrementToken() throws IOException {
if (save != null) {
// clearAttributes(); // not currently necessary
restoreState(save);
save = null;
return true;
}
if (!input.incrementToken()) return false;
// pass through zero-length terms
if (termAtt.length() == 0) return true;
String value = termAtt.toString();
String phonetic = null;
try {
String v = encoder.encode(value).toString();
if (v.length() > 0 && !value.equals(v)) phonetic = v;
} catch (Exception ignored) {
} // just use the direct text
if (phonetic == null) return true;
if (!inject) {
// just modify this token
termAtt.setEmpty().append(phonetic);
return true;
}
// We need to return both the original and the phonetic tokens.
// to avoid a orig=captureState() change_to_phonetic() saved=captureState() restoreState(orig)
// we return the phonetic alternative first
int origOffset = posAtt.getPositionIncrement();
posAtt.setPositionIncrement(0);
save = captureState();
posAtt.setPositionIncrement(origOffset);
termAtt.setEmpty().append(phonetic);
return true;
}
@Override
public void reset() throws IOException {
input.reset();
save = null;
}
}

View File

@ -0,0 +1,102 @@
/*
* Licensed to Elastic Search and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Elastic Search licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.apache.commons.codec.Encoder;
import org.apache.commons.codec.language.*;
import org.apache.commons.codec.language.bm.BeiderMorseEncoder;
import org.apache.commons.codec.language.bm.NameType;
import org.apache.commons.codec.language.bm.RuleType;
import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.ElasticSearchIllegalArgumentException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.assistedinject.Assisted;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.settings.IndexSettings;
/**
*
*/
@AnalysisSettingsRequired
public class PhoneticTokenFilterFactory extends AbstractTokenFilterFactory {
private final Encoder encoder;
private final boolean replace;
@Inject
public PhoneticTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, @Assisted String name, @Assisted Settings settings) {
super(index, indexSettings, name, settings);
this.replace = settings.getAsBoolean("replace", true);
String encoder = settings.get("encoder");
if (encoder == null) {
throw new ElasticSearchIllegalArgumentException("encoder must be set on phonetic token filter");
}
if ("metaphone".equalsIgnoreCase(encoder)) {
this.encoder = new Metaphone();
} else if ("soundex".equalsIgnoreCase(encoder)) {
this.encoder = new Soundex();
} else if ("caverphone1".equalsIgnoreCase(encoder)) {
this.encoder = new Caverphone1();
} else if ("caverphone2".equalsIgnoreCase(encoder)) {
this.encoder = new Caverphone2();
} else if ("caverphone".equalsIgnoreCase(encoder)) {
this.encoder = new Caverphone2();
} else if ("refined_soundex".equalsIgnoreCase(encoder) || "refinedSoundex".equalsIgnoreCase(encoder)) {
this.encoder = new RefinedSoundex();
} else if ("cologne".equalsIgnoreCase(encoder)) {
this.encoder = new ColognePhonetic();
} else if ("double_metaphone".equalsIgnoreCase(encoder) || "doubleMetaphone".equalsIgnoreCase(encoder)) {
DoubleMetaphone doubleMetaphone = new DoubleMetaphone();
doubleMetaphone.setMaxCodeLen(settings.getAsInt("max_code_len", doubleMetaphone.getMaxCodeLen()));
this.encoder = doubleMetaphone;
} else if ("bm".equalsIgnoreCase(encoder) || "beider_morse".equalsIgnoreCase(encoder)) {
BeiderMorseEncoder bm = new BeiderMorseEncoder();
String ruleType = settings.get("rule_type", "approx");
if ("approx".equalsIgnoreCase(ruleType)) {
bm.setRuleType(RuleType.APPROX);
} else if ("exact".equalsIgnoreCase(ruleType)) {
bm.setRuleType(RuleType.EXACT);
} else {
throw new ElasticSearchIllegalArgumentException("No matching rule type [" + ruleType + "] for beider morse encoder");
}
String nameType = settings.get("name_type", "generic");
if ("GENERIC".equalsIgnoreCase(nameType)) {
bm.setNameType(NameType.GENERIC);
} else if ("ASHKENAZI".equalsIgnoreCase(nameType)) {
bm.setNameType(NameType.ASHKENAZI);
} else if ("SEPHARDIC".equalsIgnoreCase(nameType)) {
bm.setNameType(NameType.SEPHARDIC);
}
this.encoder = bm;
} else {
throw new ElasticSearchIllegalArgumentException("unknown encoder [" + encoder + "] for phonetic token filter");
}
}
@Override
public TokenStream create(TokenStream tokenStream) {
if (encoder instanceof DoubleMetaphone) {
return new DoubleMetaphoneFilter(tokenStream, (DoubleMetaphone) encoder, !replace);
}
return new org.elasticsearch.index.analysis.PhoneticFilter(tokenStream, encoder, name(), !replace);
}
}

View File

@ -0,0 +1,49 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.analysis;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.index.analysis.AnalysisModule;
import org.elasticsearch.index.analysis.PhoneticAnalysisBinderProcessor;
import org.elasticsearch.plugins.AbstractPlugin;
/**
*/
public class AnalysisPhoneticPlugin extends AbstractPlugin {
@Override
public String name() {
return "analysis-phonetic";
}
@Override
public String description() {
return "Phonetic analysis support";
}
@Override
public void processModule(Module module) {
if (module instanceof AnalysisModule) {
AnalysisModule analysisModule = (AnalysisModule) module;
analysisModule.addProcessor(new PhoneticAnalysisBinderProcessor());
}
}
}

View File

@ -0,0 +1 @@
plugin=org.elasticsearch.plugin.analysis.AnalysisPhoneticPlugin

View File

@ -0,0 +1,38 @@
package org.elasticsearch.index.analysis;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNameModule;
import org.elasticsearch.index.settings.IndexSettingsModule;
import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import org.testng.annotations.Test;
import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS;
/**
*/
public class SimplePhoneticAnalysisTests {
@Test
public void testDefaultsIcuAnalysis() {
Index index = new Index("test");
Injector parentInjector = new ModulesBuilder().add(new SettingsModule(EMPTY_SETTINGS), new EnvironmentModule(new Environment(EMPTY_SETTINGS)), new IndicesAnalysisModule()).createInjector();
Injector injector = new ModulesBuilder().add(
new IndexSettingsModule(index, EMPTY_SETTINGS),
new IndexNameModule(index),
new AnalysisModule(EMPTY_SETTINGS, parentInjector.getInstance(IndicesAnalysisService.class)).addProcessor(new PhoneticAnalysisBinderProcessor()))
.createChildInjector(parentInjector);
AnalysisService analysisService = injector.getInstance(AnalysisService.class);
// need to create one with encoder...
//TokenFilterFactory tokenFilterFactory = analysisService.tokenFilter("phonetic");
//MatcherAssert.assertThat(tokenFilterFactory, Matchers.instanceOf(PhoneticTokenFilterFactory.class));
}
}

View File

@ -0,0 +1,5 @@
log4j.rootLogger=INFO, out
log4j.appender.out=org.apache.log4j.ConsoleAppender
log4j.appender.out.layout=org.apache.log4j.PatternLayout
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n