mirror of https://github.com/apache/lucene.git
LUCENE-3907: Fix EdgeNGramTokenizer and EdgeNGramTokenFilter to not generate corrupt token stream graphs.
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1479891 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
600c10c1bc
commit
582d0f103b
|
@ -69,6 +69,13 @@ Changes in backwards compatibility policy
|
|||
suggesters, you now need to call setPreservePositionIncrements(false) instead
|
||||
of configuring the token filters to not increment positions. (Adrien Grand)
|
||||
|
||||
* LUCENE-3907: EdgeNGramTokenizer now supports maxGramSize > 1024, doesn't trim
|
||||
the input, sets position increment = 1 for all tokens and doesn't support
|
||||
backward grams anymore. (Adrien Grand)
|
||||
|
||||
* LUCENE-3907: EdgeNGramTokenFilter does not support backward grams and does
|
||||
not update offsets anymore. (Adrien Grand)
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* LUCENE-4935: CustomScoreQuery wrongly applied its query boost twice
|
||||
|
|
|
@ -49,6 +49,6 @@ public class EdgeNGramFilterFactory extends TokenFilterFactory {
|
|||
|
||||
@Override
|
||||
public EdgeNGramTokenFilter create(TokenStream input) {
|
||||
return new EdgeNGramTokenFilter(input, side, minGramSize, maxGramSize);
|
||||
return new EdgeNGramTokenFilter(luceneMatchVersion, input, side, minGramSize, maxGramSize);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,11 @@ package org.apache.lucene.analysis.ngram;
|
|||
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -29,7 +31,9 @@ import java.io.IOException;
|
|||
* Tokenizes the given token into n-grams of given size(s).
|
||||
* <p>
|
||||
* This {@link TokenFilter} create n-grams from the beginning edge or ending edge of a input token.
|
||||
* </p>
|
||||
* <p><a name="version"/>As of Lucene 4.4, this filter does not support
|
||||
* {@link Side#BACK} (you can use {@link ReverseStringFilter} up-front and
|
||||
* afterward to get the same behavior) and does not update offsets anymore.
|
||||
*/
|
||||
public final class EdgeNGramTokenFilter extends TokenFilter {
|
||||
public static final Side DEFAULT_SIDE = Side.FRONT;
|
||||
|
@ -46,6 +50,7 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
},
|
||||
|
||||
/** Get the n-gram from the end of the input */
|
||||
@Deprecated
|
||||
BACK {
|
||||
@Override
|
||||
public String getLabel() { return "back"; }
|
||||
|
@ -65,6 +70,7 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
}
|
||||
}
|
||||
|
||||
private final Version version;
|
||||
private final int minGram;
|
||||
private final int maxGram;
|
||||
private Side side;
|
||||
|
@ -73,7 +79,7 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
private int curGramSize;
|
||||
private int tokStart;
|
||||
private int tokEnd; // only used if the length changed before this filter
|
||||
private boolean hasIllegalOffsets; // only if the length changed before this filter
|
||||
private boolean updateOffsets; // never if the length changed before this filter
|
||||
private int savePosIncr;
|
||||
private boolean isFirstToken = true;
|
||||
|
||||
|
@ -84,14 +90,24 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
/**
|
||||
* Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param input {@link TokenStream} holding the input to be tokenized
|
||||
* @param side the {@link Side} from which to chop off an n-gram
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
public EdgeNGramTokenFilter(TokenStream input, Side side, int minGram, int maxGram) {
|
||||
@Deprecated
|
||||
public EdgeNGramTokenFilter(Version version, TokenStream input, Side side, int minGram, int maxGram) {
|
||||
super(input);
|
||||
|
||||
if (version == null) {
|
||||
throw new IllegalArgumentException("version must not be null");
|
||||
}
|
||||
|
||||
if (version.onOrAfter(Version.LUCENE_44) && side == Side.BACK) {
|
||||
throw new IllegalArgumentException("Side.BACK is not supported anymore as of Lucene 4.4, use ReverseStringFilter up-front and afterward");
|
||||
}
|
||||
|
||||
if (side == null) {
|
||||
throw new IllegalArgumentException("sideLabel must be either front or back");
|
||||
}
|
||||
|
@ -104,6 +120,7 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
throw new IllegalArgumentException("minGram must not be greater than maxGram");
|
||||
}
|
||||
|
||||
this.version = version;
|
||||
this.minGram = minGram;
|
||||
this.maxGram = maxGram;
|
||||
this.side = side;
|
||||
|
@ -112,13 +129,27 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
/**
|
||||
* Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param input {@link TokenStream} holding the input to be tokenized
|
||||
* @param sideLabel the name of the {@link Side} from which to chop off an n-gram
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
public EdgeNGramTokenFilter(TokenStream input, String sideLabel, int minGram, int maxGram) {
|
||||
this(input, Side.getSide(sideLabel), minGram, maxGram);
|
||||
@Deprecated
|
||||
public EdgeNGramTokenFilter(Version version, TokenStream input, String sideLabel, int minGram, int maxGram) {
|
||||
this(version, input, Side.getSide(sideLabel), minGram, maxGram);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param input {@link TokenStream} holding the input to be tokenized
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
public EdgeNGramTokenFilter(Version version, TokenStream input, int minGram, int maxGram) {
|
||||
this(version, input, Side.FRONT, minGram, maxGram);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -133,9 +164,14 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
curGramSize = minGram;
|
||||
tokStart = offsetAtt.startOffset();
|
||||
tokEnd = offsetAtt.endOffset();
|
||||
// if length by start + end offsets doesn't match the term text then assume
|
||||
// this is a synonym and don't adjust the offsets.
|
||||
hasIllegalOffsets = (tokStart + curTermLength) != tokEnd;
|
||||
if (version.onOrAfter(Version.LUCENE_44)) {
|
||||
// Never update offsets
|
||||
updateOffsets = false;
|
||||
} else {
|
||||
// if length by start + end offsets doesn't match the term text then assume
|
||||
// this is a synonym and don't adjust the offsets.
|
||||
updateOffsets = (tokStart + curTermLength) == tokEnd;
|
||||
}
|
||||
savePosIncr = posIncrAtt.getPositionIncrement();
|
||||
}
|
||||
}
|
||||
|
@ -145,10 +181,10 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
|
|||
int start = side == Side.FRONT ? 0 : curTermLength - curGramSize;
|
||||
int end = start + curGramSize;
|
||||
clearAttributes();
|
||||
if (hasIllegalOffsets) {
|
||||
offsetAtt.setOffset(tokStart, tokEnd);
|
||||
} else {
|
||||
if (updateOffsets) {
|
||||
offsetAtt.setOffset(tokStart + start, tokStart + end);
|
||||
} else {
|
||||
offsetAtt.setOffset(tokStart, tokEnd);
|
||||
}
|
||||
// first ngram gets increment, others don't
|
||||
if (curGramSize == minGram) {
|
||||
|
|
|
@ -24,20 +24,26 @@ import org.apache.lucene.analysis.Tokenizer;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tokenizes the input from an edge into n-grams of given size(s).
|
||||
* <p>
|
||||
* This {@link Tokenizer} create n-grams from the beginning edge or ending edge of a input token.
|
||||
* MaxGram can't be larger than 1024 because of limitation.
|
||||
* </p>
|
||||
* <p><a name="version" /> As of Lucene 4.4, this tokenizer<ul>
|
||||
* <li>can handle <code>maxGram</code> larger than 1024 chars, but beware that this will result in increased memory usage
|
||||
* <li>doesn't trim the input,
|
||||
* <li>sets position increments equal to 1 instead of 1 for the first token and 0 for all other ones
|
||||
* <li>doesn't support {@link Side#BACK} anymore.
|
||||
* </ul>
|
||||
*/
|
||||
public final class EdgeNGramTokenizer extends Tokenizer {
|
||||
public static final Side DEFAULT_SIDE = Side.FRONT;
|
||||
public static final int DEFAULT_MAX_GRAM_SIZE = 1;
|
||||
public static final int DEFAULT_MIN_GRAM_SIZE = 1;
|
||||
|
||||
|
||||
private Version version;
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
|
||||
private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
|
@ -52,6 +58,7 @@ public final class EdgeNGramTokenizer extends Tokenizer {
|
|||
},
|
||||
|
||||
/** Get the n-gram from the end of the input */
|
||||
@Deprecated
|
||||
BACK {
|
||||
@Override
|
||||
public String getLabel() { return "back"; }
|
||||
|
@ -84,56 +91,95 @@ public final class EdgeNGramTokenizer extends Tokenizer {
|
|||
/**
|
||||
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param input {@link Reader} holding the input to be tokenized
|
||||
* @param side the {@link Side} from which to chop off an n-gram
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
public EdgeNGramTokenizer(Reader input, Side side, int minGram, int maxGram) {
|
||||
@Deprecated
|
||||
public EdgeNGramTokenizer(Version version, Reader input, Side side, int minGram, int maxGram) {
|
||||
super(input);
|
||||
init(side, minGram, maxGram);
|
||||
init(version, side, minGram, maxGram);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
|
||||
* @param input {@link Reader} holding the input to be tokenized
|
||||
* @param side the {@link Side} from which to chop off an n-gram
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
public EdgeNGramTokenizer(AttributeFactory factory, Reader input, Side side, int minGram, int maxGram) {
|
||||
@Deprecated
|
||||
public EdgeNGramTokenizer(Version version, AttributeFactory factory, Reader input, Side side, int minGram, int maxGram) {
|
||||
super(factory, input);
|
||||
init(side, minGram, maxGram);
|
||||
init(version, side, minGram, maxGram);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param input {@link Reader} holding the input to be tokenized
|
||||
* @param sideLabel the name of the {@link Side} from which to chop off an n-gram
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
public EdgeNGramTokenizer(Reader input, String sideLabel, int minGram, int maxGram) {
|
||||
this(input, Side.getSide(sideLabel), minGram, maxGram);
|
||||
@Deprecated
|
||||
public EdgeNGramTokenizer(Version version, Reader input, String sideLabel, int minGram, int maxGram) {
|
||||
this(version, input, Side.getSide(sideLabel), minGram, maxGram);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
|
||||
* @param input {@link Reader} holding the input to be tokenized
|
||||
* @param sideLabel the name of the {@link Side} from which to chop off an n-gram
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
public EdgeNGramTokenizer(AttributeFactory factory, Reader input, String sideLabel, int minGram, int maxGram) {
|
||||
this(factory, input, Side.getSide(sideLabel), minGram, maxGram);
|
||||
@Deprecated
|
||||
public EdgeNGramTokenizer(Version version, AttributeFactory factory, Reader input, String sideLabel, int minGram, int maxGram) {
|
||||
this(version, factory, input, Side.getSide(sideLabel), minGram, maxGram);
|
||||
}
|
||||
|
||||
private void init(Side side, int minGram, int maxGram) {
|
||||
|
||||
/**
|
||||
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param input {@link Reader} holding the input to be tokenized
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
@Deprecated
|
||||
public EdgeNGramTokenizer(Version version, Reader input, int minGram, int maxGram) {
|
||||
this(version, input, Side.FRONT, minGram, maxGram);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates EdgeNGramTokenizer that can generate n-grams in the sizes of the given range
|
||||
*
|
||||
* @param version the <a href="#version">Lucene match version</a>
|
||||
* @param factory {@link org.apache.lucene.util.AttributeSource.AttributeFactory} to use
|
||||
* @param input {@link Reader} holding the input to be tokenized
|
||||
* @param minGram the smallest n-gram to generate
|
||||
* @param maxGram the largest n-gram to generate
|
||||
*/
|
||||
@Deprecated
|
||||
public EdgeNGramTokenizer(Version version, AttributeFactory factory, Reader input, int minGram, int maxGram) {
|
||||
this(version, factory, input, Side.FRONT, minGram, maxGram);
|
||||
}
|
||||
|
||||
private void init(Version version, Side side, int minGram, int maxGram) {
|
||||
if (version == null) {
|
||||
throw new IllegalArgumentException("version must not be null");
|
||||
}
|
||||
|
||||
if (side == null) {
|
||||
throw new IllegalArgumentException("sideLabel must be either front or back");
|
||||
}
|
||||
|
@ -146,6 +192,15 @@ public final class EdgeNGramTokenizer extends Tokenizer {
|
|||
throw new IllegalArgumentException("minGram must not be greater than maxGram");
|
||||
}
|
||||
|
||||
if (version.onOrAfter(Version.LUCENE_44)) {
|
||||
if (side == Side.BACK) {
|
||||
throw new IllegalArgumentException("Side.BACK is not supported anymore as of Lucene 4.4");
|
||||
}
|
||||
} else {
|
||||
maxGram = Math.min(maxGram, 1024);
|
||||
}
|
||||
|
||||
this.version = version;
|
||||
this.minGram = minGram;
|
||||
this.maxGram = maxGram;
|
||||
this.side = side;
|
||||
|
@ -159,20 +214,29 @@ public final class EdgeNGramTokenizer extends Tokenizer {
|
|||
if (!started) {
|
||||
started = true;
|
||||
gramSize = minGram;
|
||||
char[] chars = new char[1024];
|
||||
final int limit = side == Side.FRONT ? maxGram : 1024;
|
||||
char[] chars = new char[Math.min(1024, limit)];
|
||||
charsRead = 0;
|
||||
// TODO: refactor to a shared readFully somewhere:
|
||||
while (charsRead < chars.length) {
|
||||
boolean exhausted = false;
|
||||
while (charsRead < limit) {
|
||||
final int inc = input.read(chars, charsRead, chars.length-charsRead);
|
||||
if (inc == -1) {
|
||||
exhausted = true;
|
||||
break;
|
||||
}
|
||||
charsRead += inc;
|
||||
if (charsRead == chars.length && charsRead < limit) {
|
||||
chars = ArrayUtil.grow(chars);
|
||||
}
|
||||
}
|
||||
|
||||
inStr = new String(chars, 0, charsRead).trim(); // remove any trailing empty strings
|
||||
inStr = new String(chars, 0, charsRead);
|
||||
if (!version.onOrAfter(Version.LUCENE_44)) {
|
||||
inStr = inStr.trim();
|
||||
}
|
||||
|
||||
if (charsRead == chars.length) {
|
||||
if (!exhausted) {
|
||||
// Read extra throwaway chars so that on end() we
|
||||
// report the correct offset:
|
||||
char[] throwaway = new char[1024];
|
||||
|
@ -190,6 +254,8 @@ public final class EdgeNGramTokenizer extends Tokenizer {
|
|||
return false;
|
||||
}
|
||||
posIncrAtt.setPositionIncrement(1);
|
||||
} else if (version.onOrAfter(Version.LUCENE_44)) {
|
||||
posIncrAtt.setPositionIncrement(1);
|
||||
} else {
|
||||
posIncrAtt.setPositionIncrement(0);
|
||||
}
|
||||
|
@ -200,7 +266,7 @@ public final class EdgeNGramTokenizer extends Tokenizer {
|
|||
}
|
||||
|
||||
// if we have hit the end of our n-gram size range, quit
|
||||
if (gramSize > maxGram) {
|
||||
if (gramSize > maxGram || gramSize > inLen) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -50,6 +50,6 @@ public class EdgeNGramTokenizerFactory extends TokenizerFactory {
|
|||
|
||||
@Override
|
||||
public EdgeNGramTokenizer create(AttributeFactory factory, Reader input) {
|
||||
return new EdgeNGramTokenizer(factory, input, side, minGramSize, maxGramSize);
|
||||
return new EdgeNGramTokenizer(luceneMatchVersion, factory, input, side, minGramSize, maxGramSize);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -153,17 +153,6 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
// Not broken: we forcefully add this, so we shouldn't
|
||||
// also randomly pick it:
|
||||
ValidatingTokenFilter.class,
|
||||
// NOTE: these by themselves won't cause any 'basic assertions' to fail.
|
||||
// but see https://issues.apache.org/jira/browse/LUCENE-3920, if any
|
||||
// tokenfilter that combines words (e.g. shingles) comes after them,
|
||||
// this will create bogus offsets because their 'offsets go backwards',
|
||||
// causing shingle or whatever to make a single token with a
|
||||
// startOffset thats > its endOffset
|
||||
// (see LUCENE-3738 for a list of other offenders here)
|
||||
// broken!
|
||||
EdgeNGramTokenizer.class,
|
||||
// broken!
|
||||
EdgeNGramTokenFilter.class,
|
||||
// broken!
|
||||
WordDelimiterFilter.class)) {
|
||||
for (Constructor<?> ctor : c.getConstructors()) {
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
|
|||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
|
||||
import org.apache.lucene.analysis.position.PositionFilter;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
|
@ -47,7 +48,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
public void testInvalidInput() throws Exception {
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 0, 0);
|
||||
new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 0, 0);
|
||||
} catch (IllegalArgumentException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
@ -57,7 +58,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
public void testInvalidInput2() throws Exception {
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 2, 1);
|
||||
new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 2, 1);
|
||||
} catch (IllegalArgumentException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
@ -67,7 +68,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
public void testInvalidInput3() throws Exception {
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, -1, 2);
|
||||
new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, -1, 2);
|
||||
} catch (IllegalArgumentException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
@ -75,27 +76,27 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testFrontUnigram() throws Exception {
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1});
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 1);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{5});
|
||||
}
|
||||
|
||||
public void testBackUnigram() throws Exception {
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.BACK, 1, 1);
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(Version.LUCENE_43, input, EdgeNGramTokenFilter.Side.BACK, 1, 1);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5});
|
||||
}
|
||||
|
||||
public void testOversizedNgrams() throws Exception {
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 6, 6);
|
||||
assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0]);
|
||||
}
|
||||
|
||||
public void testFrontRangeOfNgrams() throws Exception {
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
|
||||
}
|
||||
|
||||
public void testBackRangeOfNgrams() throws Exception {
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.BACK, 1, 3);
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(Version.LUCENE_43, input, EdgeNGramTokenFilter.Side.BACK, 1, 3);
|
||||
assertTokenStreamContents(tokenizer,
|
||||
new String[]{"e","de","cde"},
|
||||
new int[]{4,3,2},
|
||||
|
@ -109,11 +110,11 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testFilterPositions() throws Exception {
|
||||
TokenStream ts = new MockTokenizer(new StringReader("abcde vwxyz"), MockTokenizer.WHITESPACE, false);
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(ts, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer,
|
||||
new String[]{"a","ab","abc","v","vw","vwx"},
|
||||
new int[]{0,0,0,6,6,6},
|
||||
new int[]{1,2,3,7,8,9},
|
||||
new int[]{5,5,5,11,11,11},
|
||||
null,
|
||||
new int[]{1,0,0,1,0,0},
|
||||
null,
|
||||
|
@ -124,43 +125,30 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
public void testFirstTokenPositionIncrement() throws Exception {
|
||||
TokenStream ts = new MockTokenizer(new StringReader("a abc"), MockTokenizer.WHITESPACE, false);
|
||||
ts = new PositionFilter(ts, 0); // All but first token will get 0 position increment
|
||||
EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(ts, EdgeNGramTokenFilter.Side.FRONT, 2, 3);
|
||||
EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, ts, EdgeNGramTokenFilter.Side.FRONT, 2, 3);
|
||||
// The first token "a" will not be output, since it's smaller than the mingram size of 2.
|
||||
// The second token on input to EdgeNGramTokenFilter will have position increment of 0,
|
||||
// which should be increased to 1, since this is the first output token in the stream.
|
||||
assertTokenStreamContents(filter,
|
||||
new String[] { "ab", "abc" },
|
||||
new int[] { 2, 2 },
|
||||
new int[] { 4, 5 },
|
||||
new int[] { 5, 5 },
|
||||
new int[] { 1, 0 }
|
||||
);
|
||||
}
|
||||
|
||||
public void testTokenizerPositions() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(new StringReader("abcde"), EdgeNGramTokenizer.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer,
|
||||
new String[]{"a","ab","abc"},
|
||||
new int[]{0,0,0},
|
||||
new int[]{1,2,3},
|
||||
null,
|
||||
new int[]{1,0,0},
|
||||
null,
|
||||
null,
|
||||
false);
|
||||
}
|
||||
|
||||
public void testSmallTokenInStream() throws Exception {
|
||||
input = new MockTokenizer(new StringReader("abc de fgh"), MockTokenizer.WHITESPACE, false);
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
|
||||
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
|
||||
}
|
||||
|
||||
public void testReset() throws Exception {
|
||||
WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
|
||||
EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
|
||||
EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
|
||||
tokenizer.setReader(new StringReader("abcde"));
|
||||
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
|
||||
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{5,5,5});
|
||||
}
|
||||
|
||||
// LUCENE-3642
|
||||
|
@ -173,7 +161,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
|
||||
TokenFilter filters = new ASCIIFoldingFilter(tokenizer);
|
||||
filters = new EdgeNGramTokenFilter(filters, EdgeNGramTokenFilter.Side.FRONT, 2, 15);
|
||||
filters = new EdgeNGramTokenFilter(Version.LUCENE_43, filters, EdgeNGramTokenFilter.Side.FRONT, 2, 15);
|
||||
return new TokenStreamComponents(tokenizer, filters);
|
||||
}
|
||||
};
|
||||
|
@ -190,7 +178,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
|
||||
return new TokenStreamComponents(tokenizer,
|
||||
new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 4));
|
||||
new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 4));
|
||||
}
|
||||
};
|
||||
checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
|
||||
|
@ -200,7 +188,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
|
||||
return new TokenStreamComponents(tokenizer,
|
||||
new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 4));
|
||||
new EdgeNGramTokenFilter(Version.LUCENE_43, tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 4));
|
||||
}
|
||||
};
|
||||
checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER, 20, false, false);
|
||||
|
@ -213,7 +201,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer tokenizer = new KeywordTokenizer(reader);
|
||||
return new TokenStreamComponents(tokenizer,
|
||||
new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 15));
|
||||
new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 15));
|
||||
}
|
||||
};
|
||||
checkAnalysisConsistency(random, a, random.nextBoolean(), "");
|
||||
|
@ -223,7 +211,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase {
|
|||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer tokenizer = new KeywordTokenizer(reader);
|
||||
return new TokenStreamComponents(tokenizer,
|
||||
new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 15));
|
||||
new EdgeNGramTokenFilter(Version.LUCENE_43, tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 15));
|
||||
}
|
||||
};
|
||||
checkAnalysisConsistency(random, b, random.nextBoolean(), "");
|
||||
|
|
|
@ -18,13 +18,18 @@ package org.apache.lucene.analysis.ngram;
|
|||
*/
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.Analyzer.TokenStreamComponents;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
/**
|
||||
* Tests {@link EdgeNGramTokenizer} for correctness.
|
||||
|
@ -41,7 +46,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
public void testInvalidInput() throws Exception {
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 0, 0);
|
||||
new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, EdgeNGramTokenizer.Side.FRONT, 0, 0);
|
||||
} catch (IllegalArgumentException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
@ -51,7 +56,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
public void testInvalidInput2() throws Exception {
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 2, 1);
|
||||
new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, EdgeNGramTokenizer.Side.FRONT, 2, 1);
|
||||
} catch (IllegalArgumentException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
@ -61,7 +66,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
public void testInvalidInput3() throws Exception {
|
||||
boolean gotException = false;
|
||||
try {
|
||||
new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, -1, 2);
|
||||
new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, EdgeNGramTokenizer.Side.FRONT, -1, 2);
|
||||
} catch (IllegalArgumentException e) {
|
||||
gotException = true;
|
||||
}
|
||||
|
@ -69,32 +74,32 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testFrontUnigram() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 1);
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, EdgeNGramTokenizer.Side.FRONT, 1, 1);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a"}, new int[]{0}, new int[]{1}, 5 /* abcde */);
|
||||
}
|
||||
|
||||
public void testBackUnigram() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 1);
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(Version.LUCENE_43, input, EdgeNGramTokenizer.Side.BACK, 1, 1);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"e"}, new int[]{4}, new int[]{5}, 5 /* abcde */);
|
||||
}
|
||||
|
||||
public void testOversizedNgrams() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 6, 6);
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, EdgeNGramTokenizer.Side.FRONT, 6, 6);
|
||||
assertTokenStreamContents(tokenizer, new String[0], new int[0], new int[0], 5 /* abcde */);
|
||||
}
|
||||
|
||||
public void testFrontRangeOfNgrams() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
|
||||
}
|
||||
|
||||
public void testBackRangeOfNgrams() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.BACK, 1, 3);
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(Version.LUCENE_43, input, EdgeNGramTokenizer.Side.BACK, 1, 3);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"e","de","cde"}, new int[]{4,3,2}, new int[]{5,5,5}, null, null, null, 5 /* abcde */, false);
|
||||
}
|
||||
|
||||
public void testReset() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, input, EdgeNGramTokenizer.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
|
||||
tokenizer.setReader(new StringReader("abcde"));
|
||||
assertTokenStreamContents(tokenizer, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3}, 5 /* abcde */);
|
||||
|
@ -105,7 +110,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
Analyzer a = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer tokenizer = new EdgeNGramTokenizer(reader, EdgeNGramTokenizer.Side.FRONT, 2, 4);
|
||||
Tokenizer tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, reader, EdgeNGramTokenizer.Side.FRONT, 2, 4);
|
||||
return new TokenStreamComponents(tokenizer, tokenizer);
|
||||
}
|
||||
};
|
||||
|
@ -115,11 +120,57 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase {
|
|||
Analyzer b = new Analyzer() {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer tokenizer = new EdgeNGramTokenizer(reader, EdgeNGramTokenizer.Side.BACK, 2, 4);
|
||||
Tokenizer tokenizer = new EdgeNGramTokenizer(Version.LUCENE_43, reader, EdgeNGramTokenizer.Side.BACK, 2, 4);
|
||||
return new TokenStreamComponents(tokenizer, tokenizer);
|
||||
}
|
||||
};
|
||||
checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER, 20, false, false);
|
||||
checkRandomData(random(), b, 100*RANDOM_MULTIPLIER, 8192, false, false);
|
||||
}
|
||||
|
||||
public void testTokenizerPositions() throws Exception {
|
||||
EdgeNGramTokenizer tokenizer = new EdgeNGramTokenizer(Version.LUCENE_43, new StringReader("abcde"), EdgeNGramTokenizer.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer,
|
||||
new String[]{"a","ab","abc"},
|
||||
new int[]{0,0,0},
|
||||
new int[]{1,2,3},
|
||||
null,
|
||||
new int[] {1,0,0},
|
||||
null,
|
||||
null,
|
||||
false);
|
||||
|
||||
tokenizer = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"), EdgeNGramTokenizer.Side.FRONT, 1, 3);
|
||||
assertTokenStreamContents(tokenizer,
|
||||
new String[]{"a","ab","abc"},
|
||||
new int[]{0,0,0},
|
||||
new int[]{1,2,3},
|
||||
null,
|
||||
new int[]{1,1,1},
|
||||
null,
|
||||
null,
|
||||
false);
|
||||
}
|
||||
|
||||
public void testLargeInput() throws IOException {
|
||||
final String input = _TestUtil.randomSimpleString(random(), 1024 * 5);
|
||||
final int minGram = _TestUtil.nextInt(random(), 1, 1024);
|
||||
final int maxGram = _TestUtil.nextInt(random(), minGram, 5 * 1024);
|
||||
EdgeNGramTokenizer tk = new EdgeNGramTokenizer(TEST_VERSION_CURRENT, new StringReader(input), EdgeNGramTokenizer.Side.FRONT, minGram, maxGram);
|
||||
final CharTermAttribute charTermAtt = tk.addAttribute(CharTermAttribute.class);
|
||||
final OffsetAttribute offsetAtt = tk.addAttribute(OffsetAttribute.class);
|
||||
final PositionIncrementAttribute posIncAtt = tk.addAttribute(PositionIncrementAttribute.class);
|
||||
tk.reset();
|
||||
for (int i = minGram; i <= maxGram && i <= input.length(); ++i) {
|
||||
assertTrue(tk.incrementToken());
|
||||
assertEquals(0, offsetAtt.startOffset());
|
||||
assertEquals(i, offsetAtt.endOffset());
|
||||
assertEquals(1, posIncAtt.getPositionIncrement());
|
||||
assertEquals(input.substring(0, i), charTermAtt.toString());
|
||||
}
|
||||
assertFalse(tk.incrementToken());
|
||||
tk.end();
|
||||
assertEquals(input.length(), offsetAtt.startOffset());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.StringReader;
|
|||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Simple tests to ensure the NGram filter factories are working.
|
||||
|
@ -101,7 +102,7 @@ public class TestNGramFilters extends BaseTokenStreamFactoryTestCase {
|
|||
*/
|
||||
public void testEdgeNGramTokenizer3() throws Exception {
|
||||
Reader reader = new StringReader("ready");
|
||||
TokenStream stream = tokenizerFactory("EdgeNGram",
|
||||
TokenStream stream = tokenizerFactory("EdgeNGram", Version.LUCENE_43,
|
||||
"side", "back").create(reader);
|
||||
assertTokenStreamContents(stream,
|
||||
new String[] { "y" });
|
||||
|
@ -137,7 +138,7 @@ public class TestNGramFilters extends BaseTokenStreamFactoryTestCase {
|
|||
public void testEdgeNGramFilter3() throws Exception {
|
||||
Reader reader = new StringReader("ready");
|
||||
TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
|
||||
stream = tokenFilterFactory("EdgeNGram",
|
||||
stream = tokenFilterFactory("EdgeNGram", Version.LUCENE_43,
|
||||
"side", "back").create(stream);
|
||||
assertTokenStreamContents(stream,
|
||||
new String[] { "y" });
|
||||
|
|
|
@ -72,14 +72,23 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest
|
|||
}
|
||||
return factory;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a fully initialized TokenizerFactory with the specified name and key-value arguments.
|
||||
* {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
|
||||
* be on the test classpath.
|
||||
*/
|
||||
protected TokenizerFactory tokenizerFactory(String name, String... keysAndValues) throws Exception {
|
||||
return tokenizerFactory(name, TEST_VERSION_CURRENT, new ClasspathResourceLoader(getClass()), keysAndValues);
|
||||
return tokenizerFactory(name, TEST_VERSION_CURRENT, keysAndValues);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a fully initialized TokenizerFactory with the specified name and key-value arguments.
|
||||
* {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
|
||||
* be on the test classpath.
|
||||
*/
|
||||
protected TokenizerFactory tokenizerFactory(String name, Version version, String... keysAndValues) throws Exception {
|
||||
return tokenizerFactory(name, version, new ClasspathResourceLoader(getClass()), keysAndValues);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -89,14 +98,23 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest
|
|||
protected TokenizerFactory tokenizerFactory(String name, Version matchVersion, ResourceLoader loader, String... keysAndValues) throws Exception {
|
||||
return (TokenizerFactory) analysisFactory(TokenizerFactory.lookupClass(name), matchVersion, loader, keysAndValues);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a fully initialized TokenFilterFactory with the specified name and key-value arguments.
|
||||
* {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
|
||||
* be on the test classpath.
|
||||
*/
|
||||
protected TokenFilterFactory tokenFilterFactory(String name, Version version, String... keysAndValues) throws Exception {
|
||||
return tokenFilterFactory(name, version, new ClasspathResourceLoader(getClass()), keysAndValues);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a fully initialized TokenFilterFactory with the specified name and key-value arguments.
|
||||
* {@link ClasspathResourceLoader} is used for loading resources, so any required ones should
|
||||
* be on the test classpath.
|
||||
*/
|
||||
protected TokenFilterFactory tokenFilterFactory(String name, String... keysAndValues) throws Exception {
|
||||
return tokenFilterFactory(name, TEST_VERSION_CURRENT, new ClasspathResourceLoader(getClass()), keysAndValues);
|
||||
return tokenFilterFactory(name, TEST_VERSION_CURRENT, keysAndValues);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -18,9 +18,14 @@ package org.apache.lucene.classification;
|
|||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.Reader;
|
||||
|
@ -46,8 +51,8 @@ public class SimpleNaiveBayesClassifierTest extends ClassificationTestBase<Bytes
|
|||
private class NGramAnalyzer extends Analyzer {
|
||||
@Override
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
return new TokenStreamComponents(new EdgeNGramTokenizer(reader, EdgeNGramTokenizer.Side.BACK,
|
||||
10, 20));
|
||||
final Tokenizer tokenizer = new KeywordTokenizer(reader);
|
||||
return new TokenStreamComponents(tokenizer, new ReverseStringFilter(TEST_VERSION_CURRENT, new EdgeNGramTokenFilter(TEST_VERSION_CURRENT, new ReverseStringFilter(TEST_VERSION_CURRENT, tokenizer), 10, 20)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue