SOLR-1677: More factories converted to use matchVersion

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/branches/solr@923670 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2010-03-16 10:36:22 +00:00
parent cf770dbde8
commit 1902d9855e
13 changed files with 32 additions and 18 deletions

View File

@ -29,6 +29,7 @@ import java.io.Reader;
public class ArabicLetterTokenizerFactory extends BaseTokenizerFactory{
public ArabicLetterTokenizer create(Reader input) {
return new ArabicLetterTokenizer(input);
assureMatchVersion();
return new ArabicLetterTokenizer(luceneMatchVersion, input);
}
}

View File

@ -56,7 +56,8 @@ public class ElisionFilterFactory extends BaseTokenFilterFactory implements Reso
}
public ElisionFilter create(TokenStream input) {
return new ElisionFilter(input,articles);
assureMatchVersion();
return new ElisionFilter(luceneMatchVersion,input,articles);
}
}

View File

@ -27,6 +27,7 @@ import java.io.Reader;
*/
public class LetterTokenizerFactory extends BaseTokenizerFactory {
public LetterTokenizer create(Reader input) {
return new LetterTokenizer(input);
assureMatchVersion();
return new LetterTokenizer(luceneMatchVersion, input);
}
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.analysis.LowerCaseFilter;
*/
public class LowerCaseFilterFactory extends BaseTokenFilterFactory {
public LowerCaseFilter create(TokenStream input) {
return new LowerCaseFilter(input);
assureMatchVersion();
return new LowerCaseFilter(luceneMatchVersion,input);
}
}

View File

@ -27,6 +27,7 @@ import java.io.Reader;
*/
public class LowerCaseTokenizerFactory extends BaseTokenizerFactory {
public LowerCaseTokenizer create(Reader input) {
return new LowerCaseTokenizer(input);
assureMatchVersion();
return new LowerCaseTokenizer(luceneMatchVersion,input);
}
}

View File

@ -28,7 +28,8 @@ import org.apache.lucene.analysis.reverse.ReverseStringFilter;
*/
public class ReverseStringFilterFactory extends BaseTokenFilterFactory {
public ReverseStringFilter create(TokenStream in) {
return new ReverseStringFilter(in);
assureMatchVersion();
return new ReverseStringFilter(luceneMatchVersion,in);
}
}

View File

@ -36,7 +36,8 @@ public class RussianLetterTokenizerFactory extends BaseTokenizerFactory {
}
public RussianLetterTokenizer create(Reader in) {
return new RussianLetterTokenizer(in);
assureMatchVersion();
return new RussianLetterTokenizer(luceneMatchVersion,in);
}
}

View File

@ -78,7 +78,9 @@ public class StopFilterFactory extends BaseTokenFilterFactory implements Resourc
}
public StopFilter create(TokenStream input) {
StopFilter stopFilter = new StopFilter(enablePositionIncrements,input,stopWords,ignoreCase);
assureMatchVersion();
StopFilter stopFilter = new StopFilter(luceneMatchVersion,input,stopWords,ignoreCase);
stopFilter.setEnablePositionIncrements(enablePositionIncrements);
return stopFilter;
}
}

View File

@ -27,6 +27,7 @@ import java.io.Reader;
*/
public class WhitespaceTokenizerFactory extends BaseTokenizerFactory {
public WhitespaceTokenizer create(Reader input) {
return new WhitespaceTokenizer(input);
assureMatchVersion();
return new WhitespaceTokenizer(luceneMatchVersion,input);
}
}

View File

@ -33,6 +33,7 @@ public class TestArabicFilters extends BaseTokenTestCase {
public void testTokenizer() throws Exception {
Reader reader = new StringReader("الذين مَلكت أيمانكم");
ArabicLetterTokenizerFactory factory = new ArabicLetterTokenizerFactory();
factory.init(DEFAULT_VERSION_PARAM);
Tokenizer stream = factory.create(reader);
assertTokenStreamContents(stream, new String[] {"الذين", "مَلكت", "أيمانكم"});
}
@ -44,6 +45,8 @@ public class TestArabicFilters extends BaseTokenTestCase {
Reader reader = new StringReader("الذين مَلكت أيمانكم");
ArabicLetterTokenizerFactory factory = new ArabicLetterTokenizerFactory();
ArabicNormalizationFilterFactory filterFactory = new ArabicNormalizationFilterFactory();
factory.init(DEFAULT_VERSION_PARAM);
filterFactory.init(DEFAULT_VERSION_PARAM);
Tokenizer tokenizer = factory.create(reader);
TokenStream stream = filterFactory.create(tokenizer);
assertTokenStreamContents(stream, new String[] {"الذين", "ملكت", "ايمانكم"});
@ -57,6 +60,8 @@ public class TestArabicFilters extends BaseTokenTestCase {
ArabicLetterTokenizerFactory factory = new ArabicLetterTokenizerFactory();
ArabicNormalizationFilterFactory normFactory = new ArabicNormalizationFilterFactory();
ArabicStemFilterFactory stemFactory = new ArabicStemFilterFactory();
factory.init(DEFAULT_VERSION_PARAM);
normFactory.init(DEFAULT_VERSION_PARAM);
Tokenizer tokenizer = factory.create(reader);
TokenStream stream = normFactory.create(tokenizer);
stream = stemFactory.create(stream);

View File

@ -39,6 +39,7 @@ public class TestElisionFilterFactory extends BaseTokenTestCase {
Reader reader = new StringReader("l'avion");
Tokenizer tokenizer = new WhitespaceTokenizer(reader);
ElisionFilterFactory factory = new ElisionFilterFactory();
factory.init(DEFAULT_VERSION_PARAM);
ResourceLoader loader = new SolrResourceLoader(null, null);
Map<String,String> args = new HashMap<String,String>();
args.put("articles", "frenchArticles.txt");

View File

@ -35,6 +35,7 @@ public class TestReverseStringFilterFactory extends BaseTokenTestCase {
Reader reader = new StringReader("simple test");
Tokenizer tokenizer = new WhitespaceTokenizer(reader);
ReverseStringFilterFactory factory = new ReverseStringFilterFactory();
factory.init(DEFAULT_VERSION_PARAM);
TokenStream stream = factory.create(tokenizer);
assertTokenStreamContents(stream, new String[] { "elpmis", "tset" });
}

View File

@ -34,9 +34,8 @@ public class TestRussianFilters extends BaseTokenTestCase {
*/
public void testTokenizer() throws Exception {
Reader reader = new StringReader("Вместе с тем о силе электромагнитной 100");
Map<String,String> args = new HashMap<String,String>();
RussianLetterTokenizerFactory factory = new RussianLetterTokenizerFactory();
factory.init(args);
factory.init(DEFAULT_VERSION_PARAM);
Tokenizer stream = factory.create(reader);
assertTokenStreamContents(stream, new String[] {"Вместе", "с", "тем", "о",
"силе", "электромагнитной", "100"});
@ -47,11 +46,10 @@ public class TestRussianFilters extends BaseTokenTestCase {
*/
public void testLowerCase() throws Exception {
Reader reader = new StringReader("Вместе с тем о силе электромагнитной 100");
Map<String,String> args = new HashMap<String,String>();
RussianLetterTokenizerFactory factory = new RussianLetterTokenizerFactory();
factory.init(args);
factory.init(DEFAULT_VERSION_PARAM);
RussianLowerCaseFilterFactory filterFactory = new RussianLowerCaseFilterFactory();
filterFactory.init(args);
filterFactory.init(DEFAULT_VERSION_PARAM);
Tokenizer tokenizer = factory.create(reader);
TokenStream stream = filterFactory.create(tokenizer);
assertTokenStreamContents(stream, new String[] {"вместе", "с", "тем", "о",
@ -63,13 +61,12 @@ public class TestRussianFilters extends BaseTokenTestCase {
*/
public void testStemmer() throws Exception {
Reader reader = new StringReader("Вместе с тем о силе электромагнитной 100");
Map<String,String> args = new HashMap<String,String>();
RussianLetterTokenizerFactory factory = new RussianLetterTokenizerFactory();
factory.init(args);
factory.init(DEFAULT_VERSION_PARAM);
RussianLowerCaseFilterFactory caseFactory = new RussianLowerCaseFilterFactory();
caseFactory.init(args);
caseFactory.init(DEFAULT_VERSION_PARAM);
RussianStemFilterFactory stemFactory = new RussianStemFilterFactory();
stemFactory.init(args);
stemFactory.init(DEFAULT_VERSION_PARAM);
Tokenizer tokenizer = factory.create(reader);
TokenStream stream = caseFactory.create(tokenizer);
stream = stemFactory.create(stream);