LUCENE-2413: consolidate PorterStemmer to contrib/analyzers

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@940459 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2010-05-03 13:54:44 +00:00
parent 722e52db36
commit 1d14f8f18f
16 changed files with 49 additions and 479 deletions

View File

@ -4,6 +4,10 @@ Lucene Change Log
Changes in backwards compatibility policy
* LUCENE-2413: Consolidated all Lucene analyzers into contrib/analyzers.
- o.a.l.analysis.PorterStemFilter -> o.a.l.analysis.en.PorterStemFilter
... (in progress)
* LUCENE-1458, LUCENE-2111, LUCENE-2354: Changes from flexible indexing:
- On upgrading to 3.1, if you do not fully reindex your documents,

View File

@ -23,6 +23,11 @@ Changes in backwards compatibility policy
* LUCENE-2323: Moved contrib/wikipedia functionality into contrib/analyzers.
Additionally the package was changed from org.apache.lucene.wikipedia.analysis
to org.apache.lucene.analysis.wikipedia. (Robert Muir)
* LUCENE-2413: Consolidated all analyzers into contrib/analyzers.
- contrib/analyzers/smartcn now depends on contrib/analyzers/common
- The "AnalyzerUtil" in wordnet was removed.
... (in progress)
Changes in runtime behavior

View File

@ -24,7 +24,6 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerFilter;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.analysis;
package org.apache.lucene.analysis.en;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -19,6 +19,8 @@ package org.apache.lucene.analysis;
import java.io.IOException;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

View File

@ -1,4 +1,4 @@
package org.apache.lucene.analysis;
package org.apache.lucene.analysis.en;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more

View File

@ -1,4 +1,4 @@
package org.apache.lucene.analysis;
package org.apache.lucene.analysis.en;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -24,6 +24,14 @@ import java.io.InputStreamReader;
import java.io.StringReader;
import java.util.zip.ZipFile;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerFilter;
import org.apache.lucene.analysis.KeywordTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
/**
* Test the PorterStemFilter with Martin Porter's test data.
*/

View File

@ -7,7 +7,7 @@ import java.util.Map;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.KeywordTokenizer;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;

View File

@ -28,11 +28,27 @@
<property name="maven.dist.dir" location="../../../dist/maven" />
<import file="../../contrib-build.xml"/>
<contrib-uptodate name="analyzers/common" jarfile="${common.dir}/build/contrib/analyzers/common/lucene-analyzers-${version}.jar"
property="analyzers-common.uptodate" classpath.property="analyzers-common.jar"/>
<path id="classpath">
<pathelement path="${analyzers-common.jar}"/>
<path refid="base.classpath"/>
</path>
<path id="test.classpath">
<pathelement path="${analyzers-common.jar}"/>
<path refid="classpath"/>
<pathelement location="../../../build/classes/test/"/>
<path refid="junit-path"/>
<pathelement location="${build.dir}/classes/java"/>
</path>
</path>
<target name="compile-core" depends="build-analyzers-common, common.compile-core" />
<target name="build-analyzers-common" unless="analyzers-common.uptodate">
<echo>Misc building dependency ${analyzers-common.jar}</echo>
<ant antfile="../common/build.xml" target="default" inheritall="false" dir="../common" />
</target>
</project>

View File

@ -25,7 +25,7 @@ import java.util.Collections;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;

View File

@ -1,464 +0,0 @@
package org.apache.lucene.wordnet;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
/**
* Various fulltext analysis utilities avoiding redundant code in several
* classes.
*
*/
public class AnalyzerUtil {
private AnalyzerUtil() {}
/**
* Returns a simple analyzer wrapper that logs all tokens produced by the
* underlying child analyzer to the given log stream (typically System.err);
* Otherwise behaves exactly like the child analyzer, delivering the very
* same tokens; useful for debugging purposes on custom indexing and/or
* querying.
*
* @param child
* the underlying child analyzer
* @param log
* the print stream to log to (typically System.err)
* @param logName
* a name for this logger (typically "log" or similar)
* @return a logging analyzer
*/
public static Analyzer getLoggingAnalyzer(final Analyzer child,
final PrintStream log, final String logName) {
if (child == null)
throw new IllegalArgumentException("child analyzer must not be null");
if (log == null)
throw new IllegalArgumentException("logStream must not be null");
return new Analyzer() {
@Override
public TokenStream tokenStream(final String fieldName, Reader reader) {
return new TokenFilter(child.tokenStream(fieldName, reader)) {
private int position = -1;
private TermAttribute termAtt = addAttribute(TermAttribute.class);
private PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
private TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
@Override
public boolean incrementToken() throws IOException {
boolean hasNext = input.incrementToken();
log.println(toString(hasNext));
return hasNext;
}
private String toString(boolean hasNext) {
if (!hasNext) return "[" + logName + ":EOS:" + fieldName + "]\n";
position += posIncrAtt.getPositionIncrement();
return "[" + logName + ":" + position + ":" + fieldName + ":"
+ termAtt.term() + ":" + offsetAtt.startOffset()
+ "-" + offsetAtt.endOffset() + ":" + typeAtt.type()
+ "]";
}
};
}
};
}
/**
* Returns an analyzer wrapper that returns at most the first
* <code>maxTokens</code> tokens from the underlying child analyzer,
* ignoring all remaining tokens.
*
* @param child
* the underlying child analyzer
* @param maxTokens
* the maximum number of tokens to return from the underlying
* analyzer (a value of Integer.MAX_VALUE indicates unlimited)
* @return an analyzer wrapper
*/
public static Analyzer getMaxTokenAnalyzer(
final Analyzer child, final int maxTokens) {
if (child == null)
throw new IllegalArgumentException("child analyzer must not be null");
if (maxTokens < 0)
throw new IllegalArgumentException("maxTokens must not be negative");
if (maxTokens == Integer.MAX_VALUE)
return child; // no need to wrap
return new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenFilter(child.tokenStream(fieldName, reader)) {
private int todo = maxTokens;
@Override
public boolean incrementToken() throws IOException {
return --todo >= 0 ? input.incrementToken() : false;
}
};
}
};
}
/**
* Returns an English stemming analyzer that stems tokens from the
* underlying child analyzer according to the Porter stemming algorithm. The
* child analyzer must deliver tokens in lower case for the stemmer to work
* properly.
* <p>
* Background: Stemming reduces token terms to their linguistic root form
* e.g. reduces "fishing" and "fishes" to "fish", "family" and "families" to
* "famili", as well as "complete" and "completion" to "complet". Note that
* the root form is not necessarily a meaningful word in itself, and that
* this is not a bug but rather a feature, if you lean back and think about
* fuzzy word matching for a bit.
* <p>
* See the Lucene contrib packages for stemmers (and stop words) for German,
* Russian and many more languages.
*
* @param child
* the underlying child analyzer
* @return an analyzer wrapper
*/
public static Analyzer getPorterStemmerAnalyzer(final Analyzer child) {
if (child == null)
throw new IllegalArgumentException("child analyzer must not be null");
return new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new PorterStemFilter(
child.tokenStream(fieldName, reader));
// /* PorterStemFilter and SnowballFilter have the same behaviour,
// but PorterStemFilter is much faster. */
// return new org.apache.lucene.analysis.snowball.SnowballFilter(
// child.tokenStream(fieldName, reader), "English");
}
};
}
/**
* Returns an analyzer wrapper that wraps the underlying child analyzer's
* token stream into a {@link SynonymTokenFilter}.
*
* @param child
* the underlying child analyzer
* @param synonyms
* the map used to extract synonyms for terms
* @param maxSynonyms
* the maximum number of synonym tokens to return per underlying
* token word (a value of Integer.MAX_VALUE indicates unlimited)
* @return a new analyzer
*/
public static Analyzer getSynonymAnalyzer(final Analyzer child,
final SynonymMap synonyms, final int maxSynonyms) {
if (child == null)
throw new IllegalArgumentException("child analyzer must not be null");
if (synonyms == null)
throw new IllegalArgumentException("synonyms must not be null");
if (maxSynonyms < 0)
throw new IllegalArgumentException("maxSynonyms must not be negative");
if (maxSynonyms == 0)
return child; // no need to wrap
return new Analyzer() {
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
return new SynonymTokenFilter(
child.tokenStream(fieldName, reader), synonyms, maxSynonyms);
}
};
}
/**
* Returns an analyzer wrapper that caches all tokens generated by the underlying child analyzer's
* token streams, and delivers those cached tokens on subsequent calls to
* <code>tokenStream(String fieldName, Reader reader)</code>
* if the fieldName has been seen before, altogether ignoring the Reader parameter on cache lookup.
* <p>
* If Analyzer / TokenFilter chains are expensive in terms of I/O or CPU, such caching can
* help improve performance if the same document is added to multiple Lucene indexes,
* because the text analysis phase need not be performed more than once.
* <p>
* Caveats:
* <ul>
* <li>Caching the tokens of large Lucene documents can lead to out of memory exceptions.</li>
* <li>The Token instances delivered by the underlying child analyzer must be immutable.</li>
* <li>The same caching analyzer instance must not be used for more than one document
* because the cache is not keyed on the Reader parameter.</li>
* </ul>
*
* @param child
* the underlying child analyzer
* @return a new analyzer
*/
public static Analyzer getTokenCachingAnalyzer(final Analyzer child) {
if (child == null)
throw new IllegalArgumentException("child analyzer must not be null");
return new Analyzer() {
private final HashMap<String,ArrayList<AttributeSource.State>> cache = new HashMap<String,ArrayList<AttributeSource.State>>();
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
final ArrayList<AttributeSource.State> tokens = cache.get(fieldName);
if (tokens == null) { // not yet cached
final ArrayList<AttributeSource.State> tokens2 = new ArrayList<AttributeSource.State>();
TokenStream tokenStream = new TokenFilter(child.tokenStream(fieldName, reader)) {
@Override
public boolean incrementToken() throws IOException {
boolean hasNext = input.incrementToken();
if (hasNext) tokens2.add(captureState());
return hasNext;
}
};
cache.put(fieldName, tokens2);
return tokenStream;
} else { // already cached
return new TokenStream() {
private Iterator<AttributeSource.State> iter = tokens.iterator();
@Override
public boolean incrementToken() {
if (!iter.hasNext()) return false;
restoreState(iter.next());
return true;
}
};
}
}
};
}
/**
* Returns (frequency:term) pairs for the top N distinct terms (aka words),
* sorted descending by frequency (and ascending by term, if tied).
* <p>
* Example XQuery:
* <pre>
* declare namespace util = "java:org.apache.lucene.index.memory.AnalyzerUtil";
* declare namespace analyzer = "java:org.apache.lucene.index.memory.PatternAnalyzer";
*
* for $pair in util:get-most-frequent-terms(
* analyzer:EXTENDED_ANALYZER(), doc("samples/shakespeare/othello.xml"), 10)
* return &lt;word word="{substring-after($pair, ':')}" frequency="{substring-before($pair, ':')}"/>
* </pre>
*
* @param analyzer
* the analyzer to use for splitting text into terms (aka words)
* @param text
* the text to analyze
* @param limit
* the maximum number of pairs to return; zero indicates
* "as many as possible".
* @return an array of (frequency:term) pairs in the form of (freq0:term0,
* freq1:term1, ..., freqN:termN). Each pair is a single string
* separated by a ':' delimiter.
*/
public static String[] getMostFrequentTerms(Analyzer analyzer, String text, int limit) {
if (analyzer == null)
throw new IllegalArgumentException("analyzer must not be null");
if (text == null)
throw new IllegalArgumentException("text must not be null");
if (limit <= 0) limit = Integer.MAX_VALUE;
// compute frequencies of distinct terms
HashMap<String,MutableInteger> map = new HashMap<String,MutableInteger>();
TokenStream stream = analyzer.tokenStream("", new StringReader(text));
TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
try {
while (stream.incrementToken()) {
MutableInteger freq = map.get(termAtt.term());
if (freq == null) {
freq = new MutableInteger(1);
map.put(termAtt.term(), freq);
} else {
freq.setValue(freq.intValue() + 1);
}
}
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
stream.close();
} catch (IOException e2) {
throw new RuntimeException(e2);
}
}
// sort by frequency, text
Map.Entry<String,MutableInteger>[] entries = new Map.Entry[map.size()];
map.entrySet().toArray(entries);
Arrays.sort(entries, new Comparator<Map.Entry<String,MutableInteger>>() {
public int compare(Map.Entry<String,MutableInteger> e1, Map.Entry<String,MutableInteger> e2) {
int f1 = e1.getValue().intValue();
int f2 = e2.getValue().intValue();
if (f2 - f1 != 0) return f2 - f1;
String s1 = e1.getKey();
String s2 = e2.getKey();
return s1.compareTo(s2);
}
});
// return top N entries
int size = Math.min(limit, entries.length);
String[] pairs = new String[size];
for (int i=0; i < size; i++) {
pairs[i] = entries[i].getValue() + ":" + entries[i].getKey();
}
return pairs;
}
private static final class MutableInteger {
private int value;
public MutableInteger(int value) { this.value = value; }
public int intValue() { return value; }
public void setValue(int value) { this.value = value; }
@Override
public String toString() { return String.valueOf(value); }
}
// TODO: could use a more general i18n approach ala http://icu.sourceforge.net/docs/papers/text_boundary_analysis_in_java/
/** (Line terminator followed by zero or more whitespace) two or more times */
private static final Pattern PARAGRAPHS = Pattern.compile("([\\r\\n\\u0085\\u2028\\u2029][ \\t\\x0B\\f]*){2,}");
/**
* Returns at most the first N paragraphs of the given text. Delimiting
* characters are excluded from the results. Each returned paragraph is
* whitespace-trimmed via String.trim(), potentially an empty string.
*
* @param text
* the text to tokenize into paragraphs
* @param limit
* the maximum number of paragraphs to return; zero indicates "as
* many as possible".
* @return the first N paragraphs
*/
public static String[] getParagraphs(String text, int limit) {
return tokenize(PARAGRAPHS, text, limit);
}
private static String[] tokenize(Pattern pattern, String text, int limit) {
String[] tokens = pattern.split(text, limit);
for (int i=tokens.length; --i >= 0; ) tokens[i] = tokens[i].trim();
return tokens;
}
// TODO: don't split on floating point numbers, e.g. 3.1415 (digit before or after '.')
/** Divides text into sentences; Includes inverted spanish exclamation and question mark */
// private static final Pattern SENTENCES = Pattern.compile("[!\\.\\?\\xA1\\xBF]+");
/**
* Returns at most the first N sentences of the given text. Delimiting
* characters are excluded from the results. Each returned sentence is
* whitespace-trimmed via String.trim(), potentially an empty string.
*
* @param text
* the text to tokenize into sentences
* @param limit
* the maximum number of sentences to return; zero indicates "as
* many as possible".
* @return the first N sentences
*/
public static String[] getSentences(String text, int limit) {
// return tokenize(SENTENCES, text, limit); // equivalent but slower
int len = text.length();
if (len == 0) return new String[] { text };
if (limit <= 0) limit = Integer.MAX_VALUE;
// average sentence length heuristic
String[] tokens = new String[Math.min(limit, 1 + len/40)];
int size = 0;
int i = 0;
while (i < len && size < limit) {
// scan to end of current sentence
int start = i;
while (i < len && !isSentenceSeparator(text.charAt(i))) i++;
if (size == tokens.length) { // grow array
String[] tmp = new String[tokens.length << 1];
System.arraycopy(tokens, 0, tmp, 0, size);
tokens = tmp;
}
// add sentence (potentially empty)
tokens[size++] = text.substring(start, i).trim();
// scan to beginning of next sentence
while (i < len && isSentenceSeparator(text.charAt(i))) i++;
}
if (size == tokens.length) return tokens;
String[] results = new String[size];
System.arraycopy(tokens, 0, results, 0, size);
return results;
}
private static boolean isSentenceSeparator(char c) {
// regex [!\\.\\?\\xA1\\xBF]
switch (c) {
case '!': return true;
case '.': return true;
case '?': return true;
case 0xA1: return true; // spanish inverted exclamation mark
case 0xBF: return true; // spanish inverted question mark
default: return false;
}
}
}

View File

@ -18,7 +18,7 @@
package org.apache.solr.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
/**
* @version $Id$

View File

@ -23,7 +23,7 @@ import java.io.StringReader;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;

View File

@ -23,7 +23,7 @@ import java.io.StringReader;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.en.PorterStemFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;

View File

@ -190,7 +190,7 @@ public class DocumentAnalysisRequestHandlerTest extends AnalysisRequestHandlerTe
assertNotNull("Expecting the 'StopFilter' to be applied on the query for the 'text' field", tokenList);
assertEquals("Query has only one token", 1, tokenList.size());
assertToken(tokenList.get(0), new TokenInfo("jumping", null, "<ALPHANUM>", 0, 7, 1, null, false));
tokenList = (List<NamedList>) queryResult.get("org.apache.lucene.analysis.PorterStemFilter");
tokenList = (List<NamedList>) queryResult.get("org.apache.lucene.analysis.en.PorterStemFilter");
assertNotNull("Expecting the 'PorterStemFilter' to be applied on the query for the 'text' field", tokenList);
assertEquals("Query has only one token", 1, tokenList.size());
assertToken(tokenList.get(0), new TokenInfo("jump", null, "<ALPHANUM>", 0, 7, 1, null, false));
@ -231,7 +231,7 @@ public class DocumentAnalysisRequestHandlerTest extends AnalysisRequestHandlerTe
assertToken(tokenList.get(1), new TokenInfo("jumped", null, "<ALPHANUM>", 8, 14, 2, null, false));
assertToken(tokenList.get(2), new TokenInfo("over", null, "<ALPHANUM>", 15, 19, 3, null, false));
assertToken(tokenList.get(3), new TokenInfo("dogs", null, "<ALPHANUM>", 24, 28, 4, null, false));
tokenList = valueResult.get("org.apache.lucene.analysis.PorterStemFilter");
tokenList = valueResult.get("org.apache.lucene.analysis.en.PorterStemFilter");
assertNotNull("Expecting the 'PorterStemFilter' to be applied on the index for the 'text' field", tokenList);
assertEquals("Expecting 4 tokens", 4, tokenList.size());
assertToken(tokenList.get(0), new TokenInfo("fox", null, "<ALPHANUM>", 4, 7, 1, null, false));

View File

@ -173,7 +173,7 @@ public class FieldAnalysisRequestHandlerTest extends AnalysisRequestHandlerTestB
assertToken(tokenList.get(5), new TokenInfo("lazy", null, "<ALPHANUM>", 34, 38, 6, null, false));
assertToken(tokenList.get(6), new TokenInfo("brown", null, "<ALPHANUM>", 39, 44, 7, null, true));
assertToken(tokenList.get(7), new TokenInfo("dogs", null, "<ALPHANUM>", 45, 49, 8, null, false));
tokenList = indexPart.get("org.apache.lucene.analysis.PorterStemFilter");
tokenList = indexPart.get("org.apache.lucene.analysis.en.PorterStemFilter");
assertNotNull("Expcting PorterStemFilter analysis breakdown", tokenList);
assertEquals(tokenList.size(), 8);
assertToken(tokenList.get(0), new TokenInfo("quick", null, "<ALPHANUM>", 4, 9, 1, null, false));
@ -208,7 +208,7 @@ public class FieldAnalysisRequestHandlerTest extends AnalysisRequestHandlerTestB
assertEquals(2, tokenList.size());
assertToken(tokenList.get(0), new TokenInfo("fox", null, "<ALPHANUM>", 0, 3, 1, null, false));
assertToken(tokenList.get(1), new TokenInfo("brown", null, "<ALPHANUM>", 4, 9, 2, null, false));
tokenList = queryPart.get("org.apache.lucene.analysis.PorterStemFilter");
tokenList = queryPart.get("org.apache.lucene.analysis.en.PorterStemFilter");
assertNotNull("Expcting PorterStemFilter analysis breakdown", tokenList);
assertEquals(2, tokenList.size());
assertToken(tokenList.get(0), new TokenInfo("fox", null, "<ALPHANUM>", 0, 3, 1, null, false));