use MockTokenizer instead of WhitespaceTokenizer for better testing

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1303382 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-03-21 13:10:38 +00:00
parent 3a4318dd06
commit fb395f66a3
3 changed files with 5 additions and 7 deletions

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
/**
* Tests ICUFoldingFilter
@ -30,7 +29,7 @@ public class TestICUFoldingFilter extends BaseTokenStreamTestCase {
Analyzer a = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
return new TokenStreamComponents(tokenizer, new ICUFoldingFilter(tokenizer));
}
};

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import com.ibm.icu.text.Normalizer2;
@ -32,7 +31,7 @@ public class TestICUNormalizer2Filter extends BaseTokenStreamTestCase {
Analyzer a = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
return new TokenStreamComponents(tokenizer, new ICUNormalizer2Filter(tokenizer));
}
};
@ -62,7 +61,7 @@ public class TestICUNormalizer2Filter extends BaseTokenStreamTestCase {
Analyzer a = new Analyzer() {
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
return new TokenStreamComponents(tokenizer, new ICUNormalizer2Filter(
tokenizer,
/* specify nfc with decompose to get nfd */

View File

@ -23,9 +23,9 @@ import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.KeywordTokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.TokenStream;
import com.ibm.icu.text.Transliterator;
@ -94,7 +94,7 @@ public class TestICUTransformFilter extends BaseTokenStreamTestCase {
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
return new TokenStreamComponents(tokenizer, new ICUTransformFilter(tokenizer, transform));
}
};