LUCENE-2413: switch over more tests to use MockAnalyzer

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@944677 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2010-05-15 16:34:07 +00:00
parent 76fe0f3c93
commit d23eb64bd7
26 changed files with 134 additions and 67 deletions

View File

@ -18,7 +18,7 @@ package org.apache.lucene.queryParser.ext;
*/ */
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.queryParser.TestQueryParser; import org.apache.lucene.queryParser.TestQueryParser;
@ -46,7 +46,7 @@ public class TestExtendableQueryParser extends TestQueryParser {
public QueryParser getParser(Analyzer a, Extensions extensions) public QueryParser getParser(Analyzer a, Extensions extensions)
throws Exception { throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
QueryParser qp = extensions == null ? new ExtendableQueryParser( QueryParser qp = extensions == null ? new ExtendableQueryParser(
TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser( TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser(
TEST_VERSION_CURRENT, "field", a, extensions); TEST_VERSION_CURRENT, "field", a, extensions);

View File

@ -18,7 +18,8 @@ package org.apache.lucene.queryParser.precedence;
*/ */
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
@ -96,10 +97,10 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public static final class QPTestAnalyzer extends Analyzer { public static final class QPTestAnalyzer extends Analyzer {
/** Filters LowerCaseTokenizer with StopFilter. */ /** Filters MockTokenizer with StopFilter. */
@Override @Override
public final TokenStream tokenStream(String fieldName, Reader reader) { public final TokenStream tokenStream(String fieldName, Reader reader) {
return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)); return new QPTestFilter(new MockTokenizer(reader, MockAnalyzer.SIMPLE, true));
} }
} }
@ -129,7 +130,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public PrecedenceQueryParser getParser(Analyzer a) throws Exception { public PrecedenceQueryParser getParser(Analyzer a) throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a); PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
qp.setDefaultOperator(PrecedenceQueryParser.OR_OPERATOR); qp.setDefaultOperator(PrecedenceQueryParser.OR_OPERATOR);
return qp; return qp;
@ -174,7 +175,7 @@ public class TestPrecedenceQueryParser extends LocalizedTestCase {
public Query getQueryDOA(String query, Analyzer a) public Query getQueryDOA(String query, Analyzer a)
throws Exception { throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a); PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR); qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
return qp.parse(query); return qp.parse(query);

View File

@ -34,7 +34,8 @@ import java.util.Collections;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer; import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.StopFilter;
@ -140,10 +141,10 @@ public class TestQPHelper extends LocalizedTestCase {
public static final class QPTestAnalyzer extends Analyzer { public static final class QPTestAnalyzer extends Analyzer {
/** Filters LowerCaseTokenizer with StopFilter. */ /** Filters MockTokenizer with StopFilter. */
@Override @Override
public final TokenStream tokenStream(String fieldName, Reader reader) { public final TokenStream tokenStream(String fieldName, Reader reader) {
return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)); return new QPTestFilter(new MockTokenizer(reader, MockAnalyzer.SIMPLE, true));
} }
} }
@ -203,7 +204,7 @@ public class TestQPHelper extends LocalizedTestCase {
public StandardQueryParser getParser(Analyzer a) throws Exception { public StandardQueryParser getParser(Analyzer a) throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
StandardQueryParser qp = new StandardQueryParser(); StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a); qp.setAnalyzer(a);
@ -293,7 +294,7 @@ public class TestQPHelper extends LocalizedTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception { public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
StandardQueryParser qp = new StandardQueryParser(); StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a); qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.AND); qp.setDefaultOperator(Operator.AND);

View File

@ -32,7 +32,8 @@ import java.util.Collections;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer; import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.StopFilter;
@ -137,10 +138,10 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public static final class QPTestAnalyzer extends Analyzer { public static final class QPTestAnalyzer extends Analyzer {
/** Filters LowerCaseTokenizer with StopFilter. */ /** Filters MockTokenizer with StopFilter. */
@Override @Override
public final TokenStream tokenStream(String fieldName, Reader reader) { public final TokenStream tokenStream(String fieldName, Reader reader) {
return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)); return new QPTestFilter(new MockTokenizer(reader, MockAnalyzer.SIMPLE, true));
} }
} }
@ -218,7 +219,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public QueryParserWrapper getParser(Analyzer a) throws Exception { public QueryParserWrapper getParser(Analyzer a) throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
QueryParserWrapper qp = new QueryParserWrapper("field", a); QueryParserWrapper qp = new QueryParserWrapper("field", a);
qp.setDefaultOperator(QueryParserWrapper.OR_OPERATOR); qp.setDefaultOperator(QueryParserWrapper.OR_OPERATOR);
return qp; return qp;
@ -303,7 +304,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
public Query getQueryDOA(String query, Analyzer a) throws Exception { public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
QueryParserWrapper qp = new QueryParserWrapper("field", a); QueryParserWrapper qp = new QueryParserWrapper("field", a);
qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR); qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR);
return qp.parse(query); return qp.parse(query);
@ -553,7 +554,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod()); assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
QueryParserWrapper qp = new QueryParserWrapper("field", QueryParserWrapper qp = new QueryParserWrapper("field",
new SimpleAnalyzer(TEST_VERSION_CURRENT)); new MockAnalyzer(MockAnalyzer.SIMPLE, true));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod()); assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
@ -684,7 +685,7 @@ public class TestQueryParserWrapper extends LocalizedTestCase {
final String monthField = "month"; final String monthField = "month";
final String hourField = "hour"; final String hourField = "hour";
QueryParserWrapper qp = new QueryParserWrapper("field", QueryParserWrapper qp = new QueryParserWrapper("field",
new SimpleAnalyzer(TEST_VERSION_CURRENT)); new MockAnalyzer(MockAnalyzer.SIMPLE, true));
// Don't set any date resolution and verify if DateField is used // Don't set any date resolution and verify if DateField is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,

View File

@ -27,10 +27,18 @@ import org.apache.lucene.util.automaton.RegExp;
* Analyzer for testing * Analyzer for testing
*/ */
public final class MockAnalyzer extends Analyzer { public final class MockAnalyzer extends Analyzer {
/** Acts Similar to WhitespaceAnalyzer */
public static final CharacterRunAutomaton WHITESPACE = public static final CharacterRunAutomaton WHITESPACE =
new CharacterRunAutomaton(new RegExp("[^ \t\r\n]+").toAutomaton()); new CharacterRunAutomaton(new RegExp("[^ \t\r\n]+").toAutomaton());
/** Acts Similar to KeywordAnalyzer.
* TODO: Keyword returns an "empty" token for an empty reader...
*/
public static final CharacterRunAutomaton KEYWORD = public static final CharacterRunAutomaton KEYWORD =
new CharacterRunAutomaton(new RegExp(".*").toAutomaton()); new CharacterRunAutomaton(new RegExp(".*").toAutomaton());
/** Acts like SimpleAnalyzer/LetterTokenizer. */
// the ugly regex below is Unicode 5.2 [:Letter:]
public static final CharacterRunAutomaton SIMPLE =
new CharacterRunAutomaton(new RegExp("[A-Za-zªµºÀ-ÖØ-öø-ˁˆ-ˑˠ-ˤˬˮͰ-ʹͶͷͺ-ͽΆΈ-ΊΌΎ-ΡΣ-ϵϷ-ҁҊ-ԥԱ-Ֆՙա-ևא-תװ-ײء-يٮٯٱ-ۓەۥۦۮۯۺ-ۼۿܐܒ-ܯݍ-ޥޱߊ-ߪߴߵߺࠀ-ࠕࠚࠤࠨऄ-हऽॐक़-ॡॱॲॹ-ॿঅ-ঌএঐও-নপ-রলশ-হঽৎড়ঢ়য়-ৡৰৱਅ-ਊਏਐਓ-ਨਪ-ਰਲਲ਼ਵਸ਼ਸਹਖ਼-ੜਫ਼ੲ-ੴઅ-ઍએ-ઑઓ-નપ-રલળવ-હઽૐૠૡଅ-ଌଏଐଓ-ନପ-ରଲଳଵ-ହଽଡ଼ଢ଼ୟ-ୡୱஃஅ-ஊஎ-ஐஒ-கஙசஜஞடணதந-பம-ஹௐఅ-ఌఎ-ఐఒ-నప-ళవ-హఽౘౙౠౡಅ-ಌಎ-ಐಒ-ನಪ-ಳವ-ಹಽೞೠೡഅ-ഌഎ-ഐഒ-നപ-ഹഽൠൡൺ-ൿඅ-ඖක-නඳ-රලව-ෆก-ะาำเ-ๆກຂຄງຈຊຍດ-ທນ-ຟມ-ຣລວສຫອ-ະາຳຽເ-ໄໆໜໝༀཀ-ཇཉ-ཬྈ-ྋက-ဪဿၐ-ၕၚ-ၝၡၥၦၮ-ၰၵ-ႁႎႠ-Ⴥა-ჺჼᄀ-ቈቊ-ቍቐ-ቖቘቚ-ቝበ-ኈኊ-ኍነ-ኰኲ-ኵኸ-ኾዀዂ-ዅወ-ዖዘ-ጐጒ-ጕጘ-ፚᎀ-ᎏᎠ-Ᏼᐁ-ᙬᙯ-ᙿᚁ-ᚚᚠ-ᛪᜀ-ᜌᜎ-ᜑᜠ-ᜱᝀ-ᝑᝠ-ᝬᝮ-ᝰក-ឳៗៜᠠ-ᡷᢀ-ᢨᢪᢰ-ᣵᤀ-ᤜᥐ-ᥭᥰ-ᥴᦀ-ᦫᧁ-ᧇᨀ-ᨖᨠ-ᩔᪧᬅ-ᬳᭅ-ᭋᮃ-ᮠᮮᮯᰀ-ᰣᱍ-ᱏᱚ-ᱽᳩ-ᳬᳮ-ᳱᴀ-ᶿḀ-ἕἘ-Ἕἠ-ὅὈ-Ὅὐ-ὗὙὛὝὟ-ώᾀ-ᾴᾶ-ᾼιῂ-ῄῆ-ῌῐ-ΐῖ-Ίῠ-Ῥῲ-ῴῶ-ῼⁱⁿₐ-ₔℂℇℊ--ℝℤΩℨK--ℹℼ-ℿⅅ-ⅉⅎↃↄⰀ-Ⱞⰰ-ⱞⱠ-ⳤⳫ-ⳮⴀ-ⴥⴰ-ⵥⵯⶀ-ⶖⶠ-ⶦⶨ-ⶮⶰ-ⶶⶸ-ⶾⷀ-ⷆⷈ-ⷎⷐ-ⷖⷘ-ⷞⸯ々〆〱-〵〻〼ぁ-ゖゝ-ゟァ-ヺー-ヿㄅ-ㄭㄱ-ㆎㆠ-ㆷㇰ-ㇿ㐀-䶵一-鿋ꀀ-ꒌꓐ-ꓽꔀ-ꘌꘐ-ꘟꘪꘫꙀ-ꙟꙢ-ꙮꙿ-ꚗꚠ-ꛥꜗ-ꜟꜢ-ꞈꞋꞌꟻ-ꠁꠃ-ꠅꠇ-ꠊꠌ-ꠢꡀ-ꡳꢂ-ꢳꣲ-ꣷꣻꤊ-ꤥꤰ-ꥆꥠ-ꥼꦄ-ꦲꧏꨀ-ꨨꩀ-ꩂꩄ-ꩋꩠ-ꩶꩺꪀ-ꪯꪱꪵꪶꪹ-ꪽꫀꫂꫛ-ꫝꯀ-ꯢ가-힣ힰ-ퟆퟋ-ퟻ豈-鶴侮-舘並-龎ff-stﬓ-ﬗיִײַ-ﬨשׁ-זּטּ-לּמּנּסּףּפּצּ-ﮱﯓ-ﴽﵐ-ﶏﶒ-ﷇﷰ-ﷻﹰ-ﹴﹶ-ﻼA--zヲ-하-ᅦᅧ-ᅬᅭ-ᅲᅳ-ᅵ𐀀-𐀋𐀍-𐀦𐀨-𐀺𐀼𐀽𐀿-𐁍𐁐-𐁝𐂀-𐃺𐊀-𐊜𐊠-𐋐𐌀-𐌞𐌰-𐍀𐍂-𐍉𐎀-𐎝𐎠-𐏃𐏈-𐏏𐐀-𐒝𐠀-𐠅𐠈𐠊-𐠵𐠷𐠸𐠼𐠿-𐡕𐤀-𐤕𐤠-𐤹𐨀𐨐-𐨓𐨕-𐨗𐨙-𐨳𐩠-𐩼𐬀-𐬵𐭀-𐭕𐭠-𐭲𐰀-𐱈𑂃-𑂯𒀀-𒍮𓀀-𓐮𝐀-𝑔𝑖-𝒜𝒞𝒟𝒢𝒥𝒦𝒩-𝒬𝒮-𝒹𝒻𝒽-𝓃𝓅-𝔅𝔇-𝔊𝔍-𝔔𝔖-𝔜𝔞-𝔹𝔻-𝔾𝕀-𝕄𝕆𝕊-𝕐𝕒-𝚥𝚨-𝛀𝛂-𝛚𝛜-𝛺𝛼-𝜔𝜖-𝜴𝜶-𝝎𝝐-𝝮𝝰-𝞈𝞊-𝞨𝞪-𝟂𝟄-𝟋𠀀-𪛖𪜀-𫜴丽-𪘀]+").toAutomaton());
private final CharacterRunAutomaton runAutomaton; private final CharacterRunAutomaton runAutomaton;
private final boolean lowerCase; private final boolean lowerCase;

View File

@ -0,0 +1,51 @@
package org.apache.lucene.analysis;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class TestMockAnalyzer extends BaseTokenStreamTestCase {
public void testWhitespace() throws Exception {
Analyzer a = new MockAnalyzer();
assertAnalyzesTo(a, "A bc defg hiJklmn opqrstuv wxy z ",
new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" });
assertAnalyzesToReuse(a, "aba cadaba shazam",
new String[] { "aba", "cadaba", "shazam" });
assertAnalyzesToReuse(a, "break on whitespace",
new String[] { "break", "on", "whitespace" });
}
public void testSimple() throws Exception {
Analyzer a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ",
new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" });
assertAnalyzesToReuse(a, "aba4cadaba-Shazam",
new String[] { "aba", "cadaba", "shazam" });
assertAnalyzesToReuse(a, "break+on/Letters",
new String[] { "break", "on", "letters" });
}
public void testKeyword() throws Exception {
Analyzer a = new MockAnalyzer(MockAnalyzer.KEYWORD, false);
assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ",
new String[] { "a-bc123 defg+hijklmn567opqrstuv78wxy_z " });
assertAnalyzesToReuse(a, "aba4cadaba-Shazam",
new String[] { "aba4cadaba-Shazam" });
assertAnalyzesToReuse(a, "break+on/Nothing",
new String[] { "break+on/Nothing" });
}
}

View File

@ -30,7 +30,6 @@ import java.util.Map;
import java.util.HashMap; import java.util.HashMap;
import java.util.Set; import java.util.Set;
import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
@ -172,7 +171,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
private void doTestReopenWithCommit (Directory dir, boolean withReopen) throws IOException { private void doTestReopenWithCommit (Directory dir, boolean withReopen) throws IOException {
IndexWriter iwriter = new IndexWriter(dir, new IndexWriterConfig( IndexWriter iwriter = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new KeywordAnalyzer()).setOpenMode( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(
OpenMode.CREATE).setMergeScheduler(new SerialMergeScheduler())); OpenMode.CREATE).setMergeScheduler(new SerialMergeScheduler()));
iwriter.commit(); iwriter.commit();
IndexReader reader = IndexReader.open(dir, false); IndexReader reader = IndexReader.open(dir, false);

View File

@ -21,7 +21,8 @@ import java.io.IOException;
import java.io.Reader; import java.io.Reader;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
@ -100,7 +101,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
private static class PayloadAnalyzer extends Analyzer { private static class PayloadAnalyzer extends Analyzer {
@Override @Override
public TokenStream tokenStream(String fieldName, Reader reader) { public TokenStream tokenStream(String fieldName, Reader reader) {
return new PayloadFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)); return new PayloadFilter(new MockTokenizer(reader, MockAnalyzer.WHITESPACE, true));
} }
} }

View File

@ -17,7 +17,7 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.RAMDirectory;
@ -36,7 +36,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestTermEnumSurrogate extends LuceneTestCase { public class TestTermEnumSurrogate extends LuceneTestCase {
public void testSeekSurrogate() throws Exception { public void testSeekSurrogate() throws Exception {
RAMDirectory dir = new RAMDirectory(); RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(),
IndexWriter.MaxFieldLength.UNLIMITED); IndexWriter.MaxFieldLength.UNLIMITED);
Document d = new Document(); Document d = new Document();
Field f = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED); Field f = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED);

View File

@ -17,7 +17,7 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FSDirectory;
@ -35,7 +35,7 @@ import java.io.File;
public class TestThreadedOptimize extends LuceneTestCase { public class TestThreadedOptimize extends LuceneTestCase {
private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT); private static final Analyzer ANALYZER = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
private final static int NUM_THREADS = 3; private final static int NUM_THREADS = 3;
//private final static int NUM_THREADS = 5; //private final static int NUM_THREADS = 5;

View File

@ -31,8 +31,8 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.StopAnalyzer; import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
@ -125,10 +125,10 @@ public class TestQueryParser extends LocalizedTestCase {
public static final class QPTestAnalyzer extends Analyzer { public static final class QPTestAnalyzer extends Analyzer {
/** Filters LowerCaseTokenizer with StopFilter. */ /** Filters MockTokenizer with StopFilter. */
@Override @Override
public final TokenStream tokenStream(String fieldName, Reader reader) { public final TokenStream tokenStream(String fieldName, Reader reader) {
return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader)); return new QPTestFilter(new MockTokenizer(reader, MockAnalyzer.SIMPLE, true));
} }
} }
@ -158,7 +158,7 @@ public class TestQueryParser extends LocalizedTestCase {
public QueryParser getParser(Analyzer a) throws Exception { public QueryParser getParser(Analyzer a) throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
qp.setDefaultOperator(QueryParser.OR_OPERATOR); qp.setDefaultOperator(QueryParser.OR_OPERATOR);
return qp; return qp;
@ -228,7 +228,7 @@ public class TestQueryParser extends LocalizedTestCase {
public Query getQueryDOA(String query, Analyzer a) public Query getQueryDOA(String query, Analyzer a)
throws Exception { throws Exception {
if (a == null) if (a == null)
a = new SimpleAnalyzer(TEST_VERSION_CURRENT); a = new MockAnalyzer(MockAnalyzer.SIMPLE, true);
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
qp.setDefaultOperator(QueryParser.AND_OPERATOR); qp.setDefaultOperator(QueryParser.AND_OPERATOR);
return qp.parse(query); return qp.parse(query);
@ -456,7 +456,7 @@ public class TestQueryParser extends LocalizedTestCase {
assertQueryEquals("[ a TO z]", null, "[a TO z]"); assertQueryEquals("[ a TO z]", null, "[a TO z]");
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod()); assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT)); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(MockAnalyzer.SIMPLE, true));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod()); assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
@ -579,7 +579,7 @@ public class TestQueryParser extends LocalizedTestCase {
final String defaultField = "default"; final String defaultField = "default";
final String monthField = "month"; final String monthField = "month";
final String hourField = "hour"; final String hourField = "hour";
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT)); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(MockAnalyzer.SIMPLE, true));
// Don't set any date resolution and verify if DateField is used // Don't set any date resolution and verify if DateField is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,

View File

@ -19,7 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
@ -42,7 +42,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase {
public void setUp() throws Exception { public void setUp() throws Exception {
super.setUp(); super.setUp();
RAMDirectory directory = new RAMDirectory(); RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new KeywordAnalyzer(), true, IndexWriter writer = new IndexWriter(directory, new MockAnalyzer(), true,
IndexWriter.MaxFieldLength.LIMITED); IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document(); Document doc = new Document();
Field titleField = new Field("title", "some title", Field.Store.NO, Field titleField = new Field("title", "some title", Field.Store.NO,

View File

@ -19,7 +19,7 @@ package org.apache.lucene.search;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
@ -39,7 +39,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase {
public void testMissingTerms() throws Exception { public void testMissingTerms() throws Exception {
String fieldName = "field1"; String fieldName = "field1";
MockRAMDirectory rd = new MockRAMDirectory(); MockRAMDirectory rd = new MockRAMDirectory();
IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer())); IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
Document doc = new Document(); Document doc = new Document();
int term = i * 10; //terms are units of 10; int term = i * 10; //terms are units of 10;

View File

@ -21,7 +21,7 @@ import java.io.BufferedReader;
import java.io.InputStream; import java.io.InputStream;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
@ -77,7 +77,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase {
int terms = (int) Math.pow(2, bits); int terms = (int) Math.pow(2, bits);
RAMDirectory dir = new RAMDirectory(); RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(MockAnalyzer.KEYWORD, false),
IndexWriter.MaxFieldLength.UNLIMITED); IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document(); Document doc = new Document();

View File

@ -18,7 +18,6 @@ package org.apache.lucene.search;
*/ */
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
@ -222,7 +221,7 @@ public class TestMultiSearcher extends LuceneTestCase
try { try {
indexWriter = new IndexWriter(directory, new IndexWriterConfig( indexWriter = new IndexWriter(directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, new KeywordAnalyzer()).setOpenMode( TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(
create ? OpenMode.CREATE : OpenMode.APPEND)); create ? OpenMode.CREATE : OpenMode.APPEND));
for (int i=0; i<nDocs; i++) { for (int i=0; i<nDocs; i++) {

View File

@ -17,6 +17,7 @@ package org.apache.lucene.search;
* limitations under the License. * limitations under the License.
*/ */
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer; import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
@ -616,7 +617,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/* build an index */ /* build an index */
RAMDirectory farsiIndex = new RAMDirectory(); RAMDirectory farsiIndex = new RAMDirectory();
IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig( IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))); TEST_VERSION_CURRENT, new MockAnalyzer(MockAnalyzer.SIMPLE, true)));
Document doc = new Document(); Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
Field.Index.NOT_ANALYZED)); Field.Index.NOT_ANALYZED));
@ -656,7 +657,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
/* build an index */ /* build an index */
RAMDirectory danishIndex = new RAMDirectory(); RAMDirectory danishIndex = new RAMDirectory();
IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig( IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig(
TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))); TEST_VERSION_CURRENT, new MockAnalyzer(MockAnalyzer.SIMPLE, true)));
// Danish collation orders the words below in the given order // Danish collation orders the words below in the given order
// (example taken from TestSort.testInternationalSort() ). // (example taken from TestSort.testInternationalSort() ).

View File

@ -25,6 +25,7 @@ import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
@ -333,7 +334,7 @@ final class TestPayloadAnalyzer extends Analyzer {
@Override @Override
public TokenStream tokenStream(String fieldName, Reader reader) { public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new LowerCaseTokenizer(LuceneTestCase.TEST_VERSION_CURRENT, reader); TokenStream result = new MockTokenizer(reader, MockAnalyzer.WHITESPACE, true);
return new PayloadFilter(result, fieldName); return new PayloadFilter(result, fieldName);
} }
} }

View File

@ -21,7 +21,7 @@ import java.text.DecimalFormat;
import java.text.NumberFormat; import java.text.NumberFormat;
import java.util.Random; import java.util.Random;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
@ -43,7 +43,7 @@ public class TestRegexpRandom extends LuceneTestCase {
protected void setUp() throws Exception { protected void setUp() throws Exception {
super.setUp(); super.setUp();
RAMDirectory dir = new RAMDirectory(); RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document(); Document doc = new Document();
Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED); Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED);

View File

@ -18,6 +18,7 @@ package org.apache.lucene.search;
*/ */
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
@ -43,8 +44,7 @@ public class TestTermVectors extends LuceneTestCase {
protected void setUp() throws Exception { protected void setUp() throws Exception {
super.setUp(); super.setUp();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, new SimpleAnalyzer( TEST_VERSION_CURRENT, new MockAnalyzer(MockAnalyzer.SIMPLE, true)));
TEST_VERSION_CURRENT)));
//writer.setUseCompoundFile(true); //writer.setUseCompoundFile(true);
//writer.infoStream = System.out; //writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 1000; i++) {
@ -96,8 +96,7 @@ public class TestTermVectors extends LuceneTestCase {
public void testTermVectorsFieldOrder() throws IOException { public void testTermVectorsFieldOrder() throws IOException {
Directory dir = new MockRAMDirectory(); Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new SimpleAnalyzer( TEST_VERSION_CURRENT, new MockAnalyzer(MockAnalyzer.SIMPLE, true)));
TEST_VERSION_CURRENT)));
Document doc = new Document(); Document doc = new Document();
doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
@ -237,7 +236,7 @@ public class TestTermVectors extends LuceneTestCase {
try { try {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, TEST_VERSION_CURRENT,
new SimpleAnalyzer(TEST_VERSION_CURRENT)) new MockAnalyzer(MockAnalyzer.SIMPLE, true))
.setOpenMode(OpenMode.CREATE)); .setOpenMode(OpenMode.CREATE));
writer.addDocument(testDoc1); writer.addDocument(testDoc1);
writer.addDocument(testDoc2); writer.addDocument(testDoc2);
@ -353,7 +352,7 @@ public class TestTermVectors extends LuceneTestCase {
// Test only a few docs having vectors // Test only a few docs having vectors
public void testRareVectors() throws IOException { public void testRareVectors() throws IOException {
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT)) TEST_VERSION_CURRENT, new MockAnalyzer(MockAnalyzer.SIMPLE, true))
.setOpenMode(OpenMode.CREATE)); .setOpenMode(OpenMode.CREATE));
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
Document doc = new Document(); Document doc = new Document();
@ -387,7 +386,7 @@ public class TestTermVectors extends LuceneTestCase {
public void testMixedVectrosVectors() throws IOException { public void testMixedVectrosVectors() throws IOException {
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, TEST_VERSION_CURRENT,
new SimpleAnalyzer(TEST_VERSION_CURRENT)).setOpenMode(OpenMode.CREATE)); new MockAnalyzer(MockAnalyzer.SIMPLE, true)).setOpenMode(OpenMode.CREATE));
Document doc = new Document(); Document doc = new Document();
doc.add(new Field("field", "one", doc.add(new Field("field", "one",
Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));

View File

@ -19,7 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
@ -109,7 +109,7 @@ public class TestTopDocsCollector extends LuceneTestCase {
// populate an index with 30 documents, this should be enough for the test. // populate an index with 30 documents, this should be enough for the test.
// The documents have no content - the test uses MatchAllDocsQuery(). // The documents have no content - the test uses MatchAllDocsQuery().
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer())); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 30; i++) { for (int i = 0; i < 30; i++) {
writer.addDocument(new Document()); writer.addDocument(new Document());
} }

View File

@ -21,7 +21,7 @@ import java.text.DecimalFormat;
import java.text.NumberFormat; import java.text.NumberFormat;
import java.util.Random; import java.util.Random;
import org.apache.lucene.analysis.KeywordAnalyzer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
@ -43,7 +43,7 @@ public class TestWildcardRandom extends LuceneTestCase {
protected void setUp() throws Exception { protected void setUp() throws Exception {
super.setUp(); super.setUp();
RAMDirectory dir = new RAMDirectory(); RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new KeywordAnalyzer(), IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(),
IndexWriter.MaxFieldLength.UNLIMITED); IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document(); Document doc = new Document();

View File

@ -52,7 +52,7 @@ public class PayloadHelper {
@Override @Override
public TokenStream tokenStream(String fieldName, Reader reader) { public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader); TokenStream result = new MockTokenizer(reader, MockAnalyzer.SIMPLE, true);
result = new PayloadFilter(result, fieldName); result = new PayloadFilter(result, fieldName);
return result; return result;
} }

View File

@ -20,7 +20,8 @@ import java.io.Reader;
import java.util.Collection; import java.util.Collection;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
@ -58,7 +59,7 @@ public class TestPayloadNearQuery extends LuceneTestCase {
private class PayloadAnalyzer extends Analyzer { private class PayloadAnalyzer extends Analyzer {
@Override @Override
public TokenStream tokenStream(String fieldName, Reader reader) { public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader); TokenStream result = new MockTokenizer(reader, MockAnalyzer.SIMPLE, true);
result = new PayloadFilter(result, fieldName); result = new PayloadFilter(result, fieldName);
return result; return result;
} }

View File

@ -30,6 +30,8 @@ import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.spans.Spans; import org.apache.lucene.search.spans.Spans;
import org.apache.lucene.search.spans.TermSpans; import org.apache.lucene.search.spans.TermSpans;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
@ -67,7 +69,7 @@ public class TestPayloadTermQuery extends LuceneTestCase {
@Override @Override
public TokenStream tokenStream(String fieldName, Reader reader) { public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader); TokenStream result = new MockTokenizer(reader, MockAnalyzer.SIMPLE, true);
result = new PayloadFilter(result, fieldName); result = new PayloadFilter(result, fieldName);
return result; return result;
} }

View File

@ -19,6 +19,7 @@ package org.apache.lucene.search.spans;
import java.io.IOException; import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document; import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
@ -57,7 +58,7 @@ public class TestBasics extends LuceneTestCase {
super.setUp(); super.setUp();
RAMDirectory directory = new RAMDirectory(); RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))); TEST_VERSION_CURRENT, new MockAnalyzer(MockAnalyzer.SIMPLE, true)));
//writer.infoStream = System.out; //writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 1000; i++) {
Document doc = new Document(); Document doc = new Document();

View File

@ -24,7 +24,8 @@ import java.util.HashSet;
import java.util.Set; import java.util.Set;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer; import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
@ -466,7 +467,7 @@ public class TestPayloadSpans extends LuceneTestCase {
@Override @Override
public TokenStream tokenStream(String fieldName, Reader reader) { public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader); TokenStream result = new MockTokenizer(reader, MockAnalyzer.SIMPLE, true);
result = new PayloadFilter(result, fieldName); result = new PayloadFilter(result, fieldName);
return result; return result;
} }
@ -518,7 +519,7 @@ public class TestPayloadSpans extends LuceneTestCase {
@Override @Override
public TokenStream tokenStream(String fieldName, Reader reader) { public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader); TokenStream result = new MockTokenizer(reader, MockAnalyzer.SIMPLE, true);
result = new PayloadFilter(result, fieldName); result = new PayloadFilter(result, fieldName);
return result; return result;
} }