mirror of https://github.com/apache/lucene.git
LUCENE-2248: Change core tests to use a global Version constant
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@908496 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
91ffd60a16
commit
8dbd6e2870
|
@ -233,6 +233,10 @@ Test Cases
|
|||
* LUCENE-2207, LUCENE-2219: Improve BaseTokenStreamTestCase to check if
|
||||
end() is implemented correctly. (Koji Sekiguchi, Robert Muir)
|
||||
|
||||
* LUCENE-2248, LUCENE-2251: Refactor tests to not use Version.LUCENE_CURRENT,
|
||||
but instead use a global static value from LuceneTestCase(J4), that
|
||||
contains the release version. (Uwe Schindler, Simon Willnauer)
|
||||
|
||||
======================= Release 3.0.0 2009-11-25 =======================
|
||||
|
||||
Changes in backwards compatibility policy
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.search.ScoreDoc;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* A very simple demo used in the API documentation (src/java/overview.html).
|
||||
|
@ -44,7 +43,7 @@ public class TestDemo extends LuceneTestCase {
|
|||
|
||||
public void testDemo() throws IOException, ParseException {
|
||||
|
||||
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
// Store the index in memory:
|
||||
Directory directory = new RAMDirectory();
|
||||
|
@ -62,7 +61,7 @@ public class TestDemo extends LuceneTestCase {
|
|||
// Now search the index:
|
||||
IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true
|
||||
// Parse a simple query that searches for "text":
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fieldname", analyzer);
|
||||
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fieldname", analyzer);
|
||||
Query query = parser.parse("text");
|
||||
ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs;
|
||||
assertEquals(1, hits.length);
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene;
|
|||
*/
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -96,7 +95,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase {
|
|||
Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
|
||||
doc.add(idField);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
MyMergeScheduler ms = new MyMergeScheduler();
|
||||
writer.setMergeScheduler(ms);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.PrintWriter;
|
|||
import java.io.StringWriter;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import junit.framework.TestSuite;
|
||||
import junit.textui.TestRunner;
|
||||
|
||||
|
@ -74,7 +73,7 @@ public class TestSearch extends LuceneTestCase {
|
|||
throws Exception
|
||||
{
|
||||
Directory directory = new RAMDirectory();
|
||||
Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
|
@ -108,7 +107,7 @@ public class TestSearch extends LuceneTestCase {
|
|||
};
|
||||
ScoreDoc[] hits = null;
|
||||
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer);
|
||||
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "contents", analyzer);
|
||||
parser.setPhraseSlop(4);
|
||||
for (int j = 0; j < queries.length; j++) {
|
||||
Query query = parser.parse(queries[j]);
|
||||
|
|
|
@ -27,8 +27,6 @@ import org.apache.lucene.analysis.*;
|
|||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.queryParser.*;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import junit.framework.TestSuite;
|
||||
import junit.textui.TestRunner;
|
||||
|
@ -79,7 +77,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
|
||||
private void doTest(PrintWriter out, boolean useCompoundFiles) throws Exception {
|
||||
Directory directory = new RAMDirectory();
|
||||
Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(directory, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
|
@ -98,7 +96,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
// try a search without OR
|
||||
Searcher searcher = new IndexSearcher(directory, true);
|
||||
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer);
|
||||
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
|
||||
|
||||
Query query = parser.parse(HIGH_PRIORITY);
|
||||
out.println("Query: " + query.toString(PRIORITY_FIELD));
|
||||
|
@ -113,7 +111,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
|
|||
searcher = new IndexSearcher(directory, true);
|
||||
hits = null;
|
||||
|
||||
parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer);
|
||||
parser = new QueryParser(TEST_VERSION_CURRENT, PRIORITY_FIELD, analyzer);
|
||||
|
||||
query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY);
|
||||
out.println("Query: " + query.toString(PRIORITY_FIELD));
|
||||
|
|
|
@ -67,7 +67,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
Directory dir = new MockRAMDirectory();
|
||||
|
||||
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
// Force frequent flushes
|
||||
writer.setMaxBufferedDocs(2);
|
||||
Document doc = new Document();
|
||||
|
@ -83,7 +83,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
writer.close();
|
||||
copyFiles(dir, cp);
|
||||
|
||||
writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
copyFiles(dir, cp);
|
||||
for(int i=0;i<7;i++) {
|
||||
writer.addDocument(doc);
|
||||
|
@ -95,7 +95,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
writer.close();
|
||||
copyFiles(dir, cp);
|
||||
dp.release();
|
||||
writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.close();
|
||||
try {
|
||||
copyFiles(dir, cp);
|
||||
|
@ -111,7 +111,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase
|
|||
final long stopTime = System.currentTimeMillis() + 1000;
|
||||
|
||||
SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
||||
// Force frequent flushes
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
|
|
@ -18,8 +18,6 @@ package org.apache.lucene.analysis;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
@ -29,7 +27,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
// testLain1Accents() is a copy of TestLatin1AccentFilter.testU().
|
||||
public void testLatin1Accents() throws Exception {
|
||||
TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader
|
||||
TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader
|
||||
("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ"
|
||||
+" Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij"
|
||||
+" ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
|
||||
|
@ -1890,7 +1888,7 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase {
|
|||
expectedOutputTokens.add(expected.toString());
|
||||
}
|
||||
|
||||
TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(inputText.toString()));
|
||||
TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(inputText.toString()));
|
||||
ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
|
||||
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
|
||||
Iterator<String> expectedIter = expectedOutputTokens.iterator();
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
|||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.index.Payload;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestAnalyzers extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -35,7 +34,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testSimple() throws Exception {
|
||||
Analyzer a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
assertAnalyzesTo(a, "foo bar FOO BAR",
|
||||
new String[] { "foo", "bar", "foo", "bar" });
|
||||
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
|
||||
|
@ -55,7 +54,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testNull() throws Exception {
|
||||
Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
|
||||
assertAnalyzesTo(a, "foo bar FOO BAR",
|
||||
new String[] { "foo", "bar", "FOO", "BAR" });
|
||||
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
|
||||
|
@ -75,7 +74,7 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testStop() throws Exception {
|
||||
Analyzer a = new StopAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer a = new StopAnalyzer(TEST_VERSION_CURRENT);
|
||||
assertAnalyzesTo(a, "foo bar FOO BAR",
|
||||
new String[] { "foo", "bar", "foo", "bar" });
|
||||
assertAnalyzesTo(a, "foo a bar such FOO THESE BAR",
|
||||
|
@ -97,11 +96,11 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
|
|||
public void testPayloadCopy() throws IOException {
|
||||
String s = "how now brown cow";
|
||||
TokenStream ts;
|
||||
ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
|
||||
ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
|
||||
ts = new PayloadSetter(ts);
|
||||
verifyPayload(ts);
|
||||
|
||||
ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(s));
|
||||
ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(s));
|
||||
ts = new PayloadSetter(ts);
|
||||
verifyPayload(ts);
|
||||
}
|
||||
|
@ -122,12 +121,12 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
|
|||
|
||||
private static class MyStandardAnalyzer extends StandardAnalyzer {
|
||||
public MyStandardAnalyzer() {
|
||||
super(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
super(TEST_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(String field, Reader reader) {
|
||||
return new WhitespaceAnalyzer(Version.LUCENE_CURRENT).tokenStream(field, reader);
|
||||
return new WhitespaceAnalyzer(TEST_VERSION_CURRENT).tokenStream(field, reader);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -144,8 +143,8 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
|
|||
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new LowerCaseFilter(Version.LUCENE_CURRENT,
|
||||
new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader));
|
||||
return new LowerCaseFilter(TEST_VERSION_CURRENT,
|
||||
new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -192,9 +191,9 @@ public class TestAnalyzers extends BaseTokenStreamTestCase {
|
|||
public void testLowerCaseFilterLowSurrogateLeftover() throws IOException {
|
||||
// test if the limit of the termbuffer is correctly used with supplementary
|
||||
// chars
|
||||
WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
|
||||
WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
|
||||
new StringReader("BogustermBogusterm\udc16"));
|
||||
LowerCaseFilter filter = new LowerCaseFilter(Version.LUCENE_CURRENT,
|
||||
LowerCaseFilter filter = new LowerCaseFilter(TEST_VERSION_CURRENT,
|
||||
tokenizer);
|
||||
assertTokenStreamContents(filter, new String[] {"bogustermbogusterm\udc16"});
|
||||
filter.reset();
|
||||
|
|
|
@ -31,14 +31,13 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.index.TermPositions;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
|
||||
private String[] tokens = new String[] {"term1", "term2", "term3", "term2"};
|
||||
|
||||
public void testCaching() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
TokenStream stream = new TokenStream() {
|
||||
private int index = 0;
|
||||
|
|
|
@ -19,13 +19,12 @@ package org.apache.lucene.analysis;
|
|||
|
||||
import java.util.*;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestCharArrayMap extends LuceneTestCase {
|
||||
Random r = newRandom();
|
||||
|
||||
public void doRandom(int iter, boolean ignoreCase) {
|
||||
CharArrayMap<Integer> map = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 1, ignoreCase);
|
||||
CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 1, ignoreCase);
|
||||
HashMap<String,Integer> hmap = new HashMap<String,Integer>();
|
||||
|
||||
char[] key;
|
||||
|
@ -63,7 +62,7 @@ public class TestCharArrayMap extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testMethods() {
|
||||
CharArrayMap<Integer> cm = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 2, false);
|
||||
CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
|
||||
HashMap<String,Integer> hm = new HashMap<String,Integer>();
|
||||
hm.put("foo",1);
|
||||
hm.put("bar",2);
|
||||
|
@ -131,7 +130,7 @@ public class TestCharArrayMap extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testModifyOnUnmodifiable(){
|
||||
CharArrayMap<Integer> map = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, 2, false);
|
||||
CharArrayMap<Integer> map = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, 2, false);
|
||||
map.put("foo",1);
|
||||
map.put("bar",2);
|
||||
final int size = map.size();
|
||||
|
@ -228,7 +227,7 @@ public class TestCharArrayMap extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testToString() {
|
||||
CharArrayMap<Integer> cm = new CharArrayMap<Integer>(Version.LUCENE_CURRENT, Collections.singletonMap("test",1), false);
|
||||
CharArrayMap<Integer> cm = new CharArrayMap<Integer>(TEST_VERSION_CURRENT, Collections.singletonMap("test",1), false);
|
||||
assertEquals("[test]",cm.keySet().toString());
|
||||
assertEquals("[1]",cm.values().toString());
|
||||
assertEquals("[test=1]",cm.entrySet().toString());
|
||||
|
|
|
@ -41,7 +41,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
|
||||
|
||||
public void testRehash() throws Exception {
|
||||
CharArraySet cas = new CharArraySet(Version.LUCENE_CURRENT, 0, true);
|
||||
CharArraySet cas = new CharArraySet(TEST_VERSION_CURRENT, 0, true);
|
||||
for(int i=0;i<TEST_STOP_WORDS.length;i++)
|
||||
cas.add(TEST_STOP_WORDS[i]);
|
||||
assertEquals(TEST_STOP_WORDS.length, cas.size());
|
||||
|
@ -52,7 +52,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
public void testNonZeroOffset() {
|
||||
String[] words={"Hello","World","this","is","a","test"};
|
||||
char[] findme="xthisy".toCharArray();
|
||||
CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10,true);
|
||||
CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
|
||||
set.addAll(Arrays.asList(words));
|
||||
assertTrue(set.contains(findme, 1, 4));
|
||||
assertTrue(set.contains(new String(findme,1,4)));
|
||||
|
@ -64,7 +64,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testObjectContains() {
|
||||
CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
|
||||
Integer val = Integer.valueOf(1);
|
||||
set.add(val);
|
||||
assertTrue(set.contains(val));
|
||||
|
@ -80,7 +80,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testClear(){
|
||||
CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10,true);
|
||||
CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10,true);
|
||||
set.addAll(Arrays.asList(TEST_STOP_WORDS));
|
||||
assertEquals("Not all words added", TEST_STOP_WORDS.length, set.size());
|
||||
set.clear();
|
||||
|
@ -94,7 +94,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testModifyOnUnmodifiable(){
|
||||
CharArraySet set=new CharArraySet(Version.LUCENE_CURRENT, 10, true);
|
||||
CharArraySet set=new CharArraySet(TEST_VERSION_CURRENT, 10, true);
|
||||
set.addAll(Arrays.asList(TEST_STOP_WORDS));
|
||||
final int size = set.size();
|
||||
set = CharArraySet.unmodifiableSet(set);
|
||||
|
@ -150,7 +150,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
// current key (now a char[]) on a Set<String> would not hit any element of the CAS and therefor never call
|
||||
// remove() on the iterator
|
||||
try{
|
||||
set.removeAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
|
||||
set.removeAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true));
|
||||
fail("Modified unmodifiable set");
|
||||
}catch (UnsupportedOperationException e) {
|
||||
// expected
|
||||
|
@ -158,7 +158,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
try{
|
||||
set.retainAll(new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(NOT_IN_SET), true));
|
||||
set.retainAll(new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(NOT_IN_SET), true));
|
||||
fail("Modified unmodifiable set");
|
||||
}catch (UnsupportedOperationException e) {
|
||||
// expected
|
||||
|
@ -179,7 +179,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testUnmodifiableSet(){
|
||||
CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 10,true);
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 10,true);
|
||||
set.addAll(Arrays.asList(TEST_STOP_WORDS));
|
||||
set.add(Integer.valueOf(1));
|
||||
final int size = set.size();
|
||||
|
@ -209,7 +209,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
"\ud801\udc1c\ud801\udc1cCDE", "A\ud801\udc1cB"};
|
||||
String[] lowerArr = new String[] {"abc\ud801\udc44",
|
||||
"\ud801\udc44\ud801\udc44cde", "a\ud801\udc44b"};
|
||||
CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), true);
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), true);
|
||||
for (String upper : upperArr) {
|
||||
set.add(upper);
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
|
||||
assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
|
||||
}
|
||||
set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS), false);
|
||||
set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS), false);
|
||||
for (String upper : upperArr) {
|
||||
set.add(upper);
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
|
||||
String[] lowerArr = new String[] { "abc\uD800", "abc\uD800efg",
|
||||
"\uD800efg", "\uD800\ud801\udc44b" };
|
||||
CharArraySet set = new CharArraySet(Version.LUCENE_31, Arrays
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, Arrays
|
||||
.asList(TEST_STOP_WORDS), true);
|
||||
for (String upper : upperArr) {
|
||||
set.add(upper);
|
||||
|
@ -244,7 +244,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
assertTrue(String.format(missing, upperArr[i]), set.contains(upperArr[i]));
|
||||
assertTrue(String.format(missing, lowerArr[i]), set.contains(lowerArr[i]));
|
||||
}
|
||||
set = new CharArraySet(Version.LUCENE_31, Arrays.asList(TEST_STOP_WORDS),
|
||||
set = new CharArraySet(TEST_VERSION_CURRENT, Arrays.asList(TEST_STOP_WORDS),
|
||||
false);
|
||||
for (String upper : upperArr) {
|
||||
set.add(upper);
|
||||
|
@ -328,8 +328,8 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testCopyCharArraySetBWCompat() {
|
||||
CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
|
||||
CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false);
|
||||
CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
|
||||
CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
|
||||
|
||||
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
|
||||
List<String> stopwordsUpper = new ArrayList<String>();
|
||||
|
@ -375,8 +375,8 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
* Test the static #copy() function with a CharArraySet as a source
|
||||
*/
|
||||
public void testCopyCharArraySet() {
|
||||
CharArraySet setIngoreCase = new CharArraySet(Version.LUCENE_CURRENT, 10, true);
|
||||
CharArraySet setCaseSensitive = new CharArraySet(Version.LUCENE_CURRENT, 10, false);
|
||||
CharArraySet setIngoreCase = new CharArraySet(TEST_VERSION_CURRENT, 10, true);
|
||||
CharArraySet setCaseSensitive = new CharArraySet(TEST_VERSION_CURRENT, 10, false);
|
||||
|
||||
List<String> stopwords = Arrays.asList(TEST_STOP_WORDS);
|
||||
List<String> stopwordsUpper = new ArrayList<String>();
|
||||
|
@ -388,8 +388,8 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
|
||||
setCaseSensitive.add(Integer.valueOf(1));
|
||||
|
||||
CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, setIngoreCase);
|
||||
CharArraySet copyCaseSens = CharArraySet.copy(Version.LUCENE_CURRENT, setCaseSensitive);
|
||||
CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, setIngoreCase);
|
||||
CharArraySet copyCaseSens = CharArraySet.copy(TEST_VERSION_CURRENT, setCaseSensitive);
|
||||
|
||||
assertEquals(setIngoreCase.size(), copy.size());
|
||||
assertEquals(setCaseSensitive.size(), copy.size());
|
||||
|
@ -431,7 +431,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
set.addAll(Arrays.asList(TEST_STOP_WORDS));
|
||||
|
||||
CharArraySet copy = CharArraySet.copy(Version.LUCENE_CURRENT, set);
|
||||
CharArraySet copy = CharArraySet.copy(TEST_VERSION_CURRENT, set);
|
||||
|
||||
assertEquals(set.size(), copy.size());
|
||||
assertEquals(set.size(), copy.size());
|
||||
|
@ -461,7 +461,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
*/
|
||||
public void testCopyEmptySet() {
|
||||
assertSame(CharArraySet.EMPTY_SET,
|
||||
CharArraySet.copy(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET));
|
||||
CharArraySet.copy(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -483,7 +483,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
* Test for NPE
|
||||
*/
|
||||
public void testContainsWithNull() {
|
||||
CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
|
||||
try {
|
||||
set.contains((char[]) null, 0, 10);
|
||||
fail("null value must raise NPE");
|
||||
|
@ -506,7 +506,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
assertTrue("in 3.0 version, iterator should be CharArraySetIterator",
|
||||
((Iterator) CharArraySet.copy(Version.LUCENE_30, hset).iterator()) instanceof CharArraySet.CharArraySetIterator);
|
||||
|
||||
CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, hset);
|
||||
CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, hset);
|
||||
assertFalse("in current version, iterator should not be CharArraySetIterator",
|
||||
((Iterator) set.iterator()) instanceof CharArraySet.CharArraySetIterator);
|
||||
|
||||
|
@ -525,7 +525,7 @@ public class TestCharArraySet extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testToString() {
|
||||
CharArraySet set = CharArraySet.copy(Version.LUCENE_CURRENT, Collections.singleton("test"));
|
||||
CharArraySet set = CharArraySet.copy(TEST_VERSION_CURRENT, Collections.singleton("test"));
|
||||
assertEquals("[test]", set.toString());
|
||||
set.add("test2");
|
||||
assertTrue(set.toString().contains(", "));
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
// internal buffer size is 1024 make sure we have a surrogate pair right at the border
|
||||
builder.insert(1023, "\ud801\udc1c");
|
||||
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
|
||||
Version.LUCENE_CURRENT, new StringReader(builder.toString()));
|
||||
TEST_VERSION_CURRENT, new StringReader(builder.toString()));
|
||||
assertTokenStreamContents(tokenizer, builder.toString().toLowerCase().split(" "));
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
}
|
||||
builder.append("\ud801\udc1cabc");
|
||||
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
|
||||
Version.LUCENE_CURRENT, new StringReader(builder.toString()));
|
||||
TEST_VERSION_CURRENT, new StringReader(builder.toString()));
|
||||
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase()});
|
||||
}
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
builder.append("A");
|
||||
}
|
||||
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
|
||||
Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString()));
|
||||
TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
|
||||
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
|
||||
}
|
||||
|
||||
|
@ -94,13 +94,13 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
}
|
||||
builder.append("\ud801\udc1c");
|
||||
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(
|
||||
Version.LUCENE_CURRENT, new StringReader(builder.toString() + builder.toString()));
|
||||
TEST_VERSION_CURRENT, new StringReader(builder.toString() + builder.toString()));
|
||||
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
|
||||
}
|
||||
|
||||
public void testLowerCaseTokenizer() throws IOException {
|
||||
StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
|
||||
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(Version.LUCENE_CURRENT,
|
||||
LowerCaseTokenizer tokenizer = new LowerCaseTokenizer(TEST_VERSION_CURRENT,
|
||||
reader);
|
||||
assertTokenStreamContents(tokenizer, new String[] { "tokenizer",
|
||||
"\ud801\udc44test" });
|
||||
|
@ -115,7 +115,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testWhitespaceTokenizer() throws IOException {
|
||||
StringReader reader = new StringReader("Tokenizer \ud801\udc1ctest");
|
||||
WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
|
||||
WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
|
||||
reader);
|
||||
assertTokenStreamContents(tokenizer, new String[] { "Tokenizer",
|
||||
"\ud801\udc1ctest" });
|
||||
|
@ -132,7 +132,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
public void testIsTokenCharCharInSubclass() {
|
||||
new TestingCharTokenizer(Version.LUCENE_30, new StringReader(""));
|
||||
try {
|
||||
new TestingCharTokenizer(Version.LUCENE_CURRENT, new StringReader(""));
|
||||
new TestingCharTokenizer(TEST_VERSION_CURRENT, new StringReader(""));
|
||||
fail("version 3.1 is not permitted if char based method is implemented");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
|
@ -142,7 +142,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
public void testNormalizeCharInSubclass() {
|
||||
new TestingCharTokenizerNormalize(Version.LUCENE_30, new StringReader(""));
|
||||
try {
|
||||
new TestingCharTokenizerNormalize(Version.LUCENE_CURRENT,
|
||||
new TestingCharTokenizerNormalize(TEST_VERSION_CURRENT,
|
||||
new StringReader(""));
|
||||
fail("version 3.1 is not permitted if char based method is implemented");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -154,7 +154,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_30,
|
||||
new StringReader(""));
|
||||
try {
|
||||
new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_CURRENT,
|
||||
new TestingCharTokenizerNormalizeIsTokenChar(TEST_VERSION_CURRENT,
|
||||
new StringReader(""));
|
||||
fail("version 3.1 is not permitted if char based method is implemented");
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
|
|
@ -18,13 +18,11 @@ package org.apache.lucene.analysis;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.StringReader;
|
||||
|
||||
public class TestISOLatin1AccentFilter extends BaseTokenStreamTestCase {
|
||||
public void testU() throws Exception {
|
||||
TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
|
||||
TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
|
||||
ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
|
||||
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
|
||||
assertTermEquals("Des", filter, termAtt);
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
|
||||
|
||||
|
@ -43,7 +42,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
|
|||
super.setUp();
|
||||
directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory,
|
||||
new SimpleAnalyzer(Version.LUCENE_CURRENT),
|
||||
new SimpleAnalyzer(TEST_VERSION_CURRENT),
|
||||
true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Document doc = new Document();
|
||||
|
@ -57,10 +56,10 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testPerFieldAnalyzer() throws Exception {
|
||||
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(Version.LUCENE_CURRENT));
|
||||
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer(TEST_VERSION_CURRENT));
|
||||
analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
|
||||
|
||||
QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, "description", analyzer);
|
||||
QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, "description", analyzer);
|
||||
Query query = queryParser.parse("partnum:Q36 AND SPACE");
|
||||
|
||||
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
|
|
|
@ -7,7 +7,6 @@ import java.util.Set;
|
|||
|
||||
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
|
@ -34,21 +33,21 @@ public class TestKeywordMarkerTokenFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
@Test
|
||||
public void testIncrementToken() throws IOException {
|
||||
CharArraySet set = new CharArraySet(Version.LUCENE_31, 5, true);
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 5, true);
|
||||
set.add("lucenefox");
|
||||
String[] output = new String[] { "the", "quick", "brown", "LuceneFox",
|
||||
"jumps" };
|
||||
assertTokenStreamContents(new LowerCaseFilterMock(
|
||||
new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
|
||||
new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
|
||||
"The quIck browN LuceneFox Jumps")), set)), output);
|
||||
Set<String> jdkSet = new HashSet<String>();
|
||||
jdkSet.add("LuceneFox");
|
||||
assertTokenStreamContents(new LowerCaseFilterMock(
|
||||
new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
|
||||
new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
|
||||
"The quIck browN LuceneFox Jumps")), jdkSet)), output);
|
||||
Set<?> set2 = set;
|
||||
assertTokenStreamContents(new LowerCaseFilterMock(
|
||||
new KeywordMarkerTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
|
||||
new KeywordMarkerTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
|
||||
"The quIck browN LuceneFox Jumps")), set2)), output);
|
||||
}
|
||||
|
||||
|
|
|
@ -18,14 +18,12 @@ package org.apache.lucene.analysis;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.StringReader;
|
||||
|
||||
public class TestLengthFilter extends BaseTokenStreamTestCase {
|
||||
|
||||
public void testFilter() throws Exception {
|
||||
TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
|
||||
TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
|
||||
new StringReader("short toolong evenmuchlongertext a ab toolong foo"));
|
||||
LengthFilter filter = new LengthFilter(stream, 2, 6);
|
||||
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
|
||||
|
|
|
@ -19,8 +19,6 @@ package org.apache.lucene.analysis;
|
|||
|
||||
import java.io.StringReader;
|
||||
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
||||
|
||||
NormalizeCharMap normMap;
|
||||
|
@ -60,55 +58,55 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testNothingChange() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "x" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"x"}, new int[]{0}, new int[]{1});
|
||||
}
|
||||
|
||||
public void test1to1() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "h" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"i"}, new int[]{0}, new int[]{1});
|
||||
}
|
||||
|
||||
public void test1to2() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "j" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"jj"}, new int[]{0}, new int[]{1});
|
||||
}
|
||||
|
||||
public void test1to3() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "k" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"kkk"}, new int[]{0}, new int[]{1});
|
||||
}
|
||||
|
||||
public void test2to4() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "ll" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"llll"}, new int[]{0}, new int[]{2});
|
||||
}
|
||||
|
||||
public void test2to1() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "aa" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"a"}, new int[]{0}, new int[]{2});
|
||||
}
|
||||
|
||||
public void test3to1() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "bbb" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"b"}, new int[]{0}, new int[]{3});
|
||||
}
|
||||
|
||||
public void test4to2() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "cccc" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[]{"cc"}, new int[]{0}, new int[]{4});
|
||||
}
|
||||
|
||||
public void test5to0() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, new StringReader( "empty" ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts, new String[0]);
|
||||
}
|
||||
|
||||
|
@ -132,7 +130,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
//
|
||||
public void testTokenStream() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap, CharReader.get( new StringReader( "h i j k ll cccc bbb aa" ) ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts,
|
||||
new String[]{"i","i","jj","kkk","llll","cc","b","a"},
|
||||
new int[]{0,2,4,6,8,11,16,20},
|
||||
|
@ -153,7 +151,7 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase {
|
|||
public void testChained() throws Exception {
|
||||
CharStream cs = new MappingCharFilter( normMap,
|
||||
new MappingCharFilter( normMap, CharReader.get( new StringReader( "aaaa ll h" ) ) ) );
|
||||
TokenStream ts = new WhitespaceTokenizer( Version.LUCENE_CURRENT, cs );
|
||||
TokenStream ts = new WhitespaceTokenizer( TEST_VERSION_CURRENT, cs );
|
||||
assertTokenStreamContents(ts,
|
||||
new String[]{"a","llllllll","i"},
|
||||
new int[]{0,5,8},
|
||||
|
|
|
@ -3,7 +3,6 @@ package org.apache.lucene.analysis;
|
|||
import java.io.StringReader;
|
||||
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -26,8 +25,8 @@ public class TestPerFieldAnalzyerWrapper extends BaseTokenStreamTestCase {
|
|||
public void testPerField() throws Exception {
|
||||
String text = "Qwerty";
|
||||
PerFieldAnalyzerWrapper analyzer =
|
||||
new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
analyzer.addAnalyzer("special", new SimpleAnalyzer(Version.LUCENE_CURRENT));
|
||||
new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
analyzer.addAnalyzer("special", new SimpleAnalyzer(TEST_VERSION_CURRENT));
|
||||
|
||||
TokenStream tokenStream = analyzer.tokenStream("field",
|
||||
new StringReader(text));
|
||||
|
|
|
@ -25,8 +25,6 @@ import java.io.InputStreamReader;
|
|||
import java.io.StringReader;
|
||||
import java.util.zip.ZipFile;
|
||||
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Test the PorterStemFilter with Martin Porter's test data.
|
||||
*/
|
||||
|
@ -60,9 +58,9 @@ public class TestPorterStemFilter extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testWithKeywordAttribute() throws IOException {
|
||||
CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
|
||||
CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
|
||||
set.add("yourselves");
|
||||
Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("yourselves yours"));
|
||||
Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("yourselves yours"));
|
||||
TokenStream filter = new PorterStemFilter(new KeywordMarkerTokenFilter(tokenizer, set));
|
||||
assertTokenStreamContents(filter, new String[] {"yourselves", "your"});
|
||||
}
|
||||
|
|
|
@ -23,16 +23,16 @@ import org.apache.lucene.util.Version;
|
|||
|
||||
public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
|
||||
|
||||
private Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
private Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
public void testMaxTermLength() throws Exception {
|
||||
StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
sa.setMaxTokenLength(5);
|
||||
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "xy", "z"});
|
||||
}
|
||||
|
||||
public void testMaxTermLength2() throws Exception {
|
||||
StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
StandardAnalyzer sa = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "toolong", "xy", "z"});
|
||||
sa.setMaxTokenLength(5);
|
||||
|
||||
|
@ -96,7 +96,7 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testLucene1140() throws Exception {
|
||||
try {
|
||||
StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
assertAnalyzesTo(analyzer, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "<HOST>" });
|
||||
} catch (NullPointerException e) {
|
||||
fail("Should not throw an NPE and it did");
|
||||
|
@ -106,7 +106,7 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testDomainNames() throws Exception {
|
||||
// Current lucene should not show the bug
|
||||
StandardAnalyzer a2 = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
StandardAnalyzer a2 = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
// domain names
|
||||
assertAnalyzesTo(a2, "www.nutch.org", new String[]{"www.nutch.org"});
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.util.HashSet;
|
|||
|
||||
public class TestStopAnalyzer extends BaseTokenStreamTestCase {
|
||||
|
||||
private StopAnalyzer stop = new StopAnalyzer(Version.LUCENE_CURRENT);
|
||||
private StopAnalyzer stop = new StopAnalyzer(TEST_VERSION_CURRENT);
|
||||
private Set<Object> inValidTokens = new HashSet<Object>();
|
||||
|
||||
public TestStopAnalyzer(String s) {
|
||||
|
@ -82,7 +82,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase {
|
|||
stopWordsSet.add("good");
|
||||
stopWordsSet.add("test");
|
||||
stopWordsSet.add("analyzer");
|
||||
StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_CURRENT, stopWordsSet);
|
||||
StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
|
||||
StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions");
|
||||
int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1};
|
||||
TokenStream stream = newStop.tokenStream("test", reader);
|
||||
|
|
|
@ -38,7 +38,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
|
|||
public void testExactCase() throws IOException {
|
||||
StringReader reader = new StringReader("Now is The Time");
|
||||
Set<String> stopWords = new HashSet<String>(Arrays.asList("is", "the", "Time"));
|
||||
TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, false);
|
||||
TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, false);
|
||||
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
|
||||
assertTrue(stream.incrementToken());
|
||||
assertEquals("Now", termAtt.term());
|
||||
|
@ -50,7 +50,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
|
|||
public void testIgnoreCase() throws IOException {
|
||||
StringReader reader = new StringReader("Now is The Time");
|
||||
Set<Object> stopWords = new HashSet<Object>(Arrays.asList( "is", "the", "Time" ));
|
||||
TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopWords, true);
|
||||
TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopWords, true);
|
||||
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
|
||||
assertTrue(stream.incrementToken());
|
||||
assertEquals("Now", termAtt.term());
|
||||
|
@ -60,8 +60,8 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
|
|||
public void testStopFilt() throws IOException {
|
||||
StringReader reader = new StringReader("Now is The Time");
|
||||
String[] stopWords = new String[] { "is", "the", "Time" };
|
||||
Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
|
||||
TokenStream stream = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
|
||||
Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
|
||||
TokenStream stream = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
|
||||
final TermAttribute termAtt = stream.getAttribute(TermAttribute.class);
|
||||
assertTrue(stream.incrementToken());
|
||||
assertEquals("Now", termAtt.term());
|
||||
|
@ -84,14 +84,14 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
|
|||
log(sb.toString());
|
||||
String stopWords[] = a.toArray(new String[0]);
|
||||
for (int i=0; i<a.size(); i++) log("Stop: "+stopWords[i]);
|
||||
Set<Object> stopSet = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords);
|
||||
Set<Object> stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords);
|
||||
// with increments
|
||||
StringReader reader = new StringReader(sb.toString());
|
||||
StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
|
||||
StopFilter stpf = new StopFilter(Version.LUCENE_24, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
|
||||
doTestStopPositons(stpf,true);
|
||||
// without increments
|
||||
reader = new StringReader(sb.toString());
|
||||
stpf = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet);
|
||||
stpf = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet);
|
||||
doTestStopPositons(stpf,false);
|
||||
// with increments, concatenating two stop filters
|
||||
ArrayList<String> a0 = new ArrayList<String>();
|
||||
|
@ -107,12 +107,12 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
|
|||
for (int i=0; i<a0.size(); i++) log("Stop0: "+stopWords0[i]);
|
||||
String stopWords1[] = a1.toArray(new String[0]);
|
||||
for (int i=0; i<a1.size(); i++) log("Stop1: "+stopWords1[i]);
|
||||
Set<Object> stopSet0 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords0);
|
||||
Set<Object> stopSet1 = StopFilter.makeStopSet(Version.LUCENE_CURRENT, stopWords1);
|
||||
Set<Object> stopSet0 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords0);
|
||||
Set<Object> stopSet1 = StopFilter.makeStopSet(TEST_VERSION_CURRENT, stopWords1);
|
||||
reader = new StringReader(sb.toString());
|
||||
StopFilter stpf0 = new StopFilter(Version.LUCENE_CURRENT, new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader), stopSet0); // first part of the set
|
||||
StopFilter stpf0 = new StopFilter(TEST_VERSION_CURRENT, new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader), stopSet0); // first part of the set
|
||||
stpf0.setEnablePositionIncrements(true);
|
||||
StopFilter stpf01 = new StopFilter(Version.LUCENE_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
|
||||
StopFilter stpf01 = new StopFilter(TEST_VERSION_CURRENT, stpf0, stopSet1); // two stop filters concatenated!
|
||||
doTestStopPositons(stpf01,true);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
|||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.English;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
|
||||
|
@ -76,7 +74,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
|
||||
|
||||
public void testGeneral() throws IOException {
|
||||
final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
|
||||
final TeeSinkTokenFilter source = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString())));
|
||||
final TokenStream sink1 = source.newSinkTokenStream();
|
||||
final TokenStream sink2 = source.newSinkTokenStream(theFilter);
|
||||
|
||||
|
@ -90,7 +88,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
}
|
||||
|
||||
public void testMultipleSources() throws Exception {
|
||||
final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer1.toString())));
|
||||
final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer1.toString())));
|
||||
final TeeSinkTokenFilter.SinkTokenStream dogDetector = tee1.newSinkTokenStream(dogFilter);
|
||||
final TeeSinkTokenFilter.SinkTokenStream theDetector = tee1.newSinkTokenStream(theFilter);
|
||||
final TokenStream source1 = new CachingTokenFilter(tee1);
|
||||
|
@ -99,7 +97,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
dogDetector.addAttribute(CheckClearAttributesAttribute.class);
|
||||
theDetector.addAttribute(CheckClearAttributesAttribute.class);
|
||||
|
||||
final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer2.toString())));
|
||||
final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer2.toString())));
|
||||
tee2.addSinkTokenStream(dogDetector);
|
||||
tee2.addSinkTokenStream(theDetector);
|
||||
final TokenStream source2 = tee2;
|
||||
|
@ -111,7 +109,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
assertTokenStreamContents(dogDetector, new String[]{"Dogs", "Dogs"});
|
||||
|
||||
source1.reset();
|
||||
TokenStream lowerCasing = new LowerCaseFilter(Version.LUCENE_CURRENT, source1);
|
||||
TokenStream lowerCasing = new LowerCaseFilter(TEST_VERSION_CURRENT, source1);
|
||||
String[] lowerCaseTokens = new String[tokens1.length];
|
||||
for (int i = 0; i < tokens1.length; i++)
|
||||
lowerCaseTokens[i] = tokens1[i].toLowerCase();
|
||||
|
@ -133,10 +131,10 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
buffer.append(English.intToEnglish(i).toUpperCase()).append(' ');
|
||||
}
|
||||
//make sure we produce the same tokens
|
||||
TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))));
|
||||
TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
|
||||
TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100));
|
||||
teeStream.consumeAllTokens();
|
||||
TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), 100);
|
||||
TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), 100);
|
||||
TermAttribute tfTok = stream.addAttribute(TermAttribute.class);
|
||||
TermAttribute sinkTok = sink.addAttribute(TermAttribute.class);
|
||||
for (int i=0; stream.incrementToken(); i++) {
|
||||
|
@ -149,12 +147,12 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
int tfPos = 0;
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
stream = new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString())));
|
||||
stream = new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString())));
|
||||
PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
|
||||
while (stream.incrementToken()) {
|
||||
tfPos += posIncrAtt.getPositionIncrement();
|
||||
}
|
||||
stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
|
||||
stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))), modCounts[j]);
|
||||
posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class);
|
||||
while (stream.incrementToken()) {
|
||||
tfPos += posIncrAtt.getPositionIncrement();
|
||||
|
@ -166,7 +164,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
|
|||
//simulate one field with one sink
|
||||
start = System.currentTimeMillis();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))));
|
||||
teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(buffer.toString()))));
|
||||
sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j]));
|
||||
PositionIncrementAttribute posIncrAtt = teeStream.getAttribute(PositionIncrementAttribute.class);
|
||||
while (teeStream.incrementToken()) {
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.collation;
|
|||
*/
|
||||
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.PerFieldAnalyzerWrapper;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
|
@ -38,14 +37,14 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.util.IndexableBinaryStringTools;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.CharBuffer;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
|
||||
public class CollationTestBase extends TestCase {
|
||||
public class CollationTestBase extends LuceneTestCase {
|
||||
|
||||
protected String firstRangeBeginningOriginal = "\u062F";
|
||||
protected String firstRangeEndOriginal = "\u0698";
|
||||
|
@ -179,7 +178,7 @@ public class CollationTestBase extends TestCase {
|
|||
String usResult) throws Exception {
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
PerFieldAnalyzerWrapper analyzer
|
||||
= new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
= new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
analyzer.addAnalyzer("US", usAnalyzer);
|
||||
analyzer.addAnalyzer("France", franceAnalyzer);
|
||||
analyzer.addAnalyzer("Sweden", swedenAnalyzer);
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TestBinaryDocument extends LuceneTestCase
|
|||
|
||||
/** add the doc to a ram index */
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
|
@ -97,7 +97,7 @@ public class TestBinaryDocument extends LuceneTestCase
|
|||
|
||||
/** add the doc to a ram index */
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ public class TestDocument extends LuceneTestCase
|
|||
public void testGetValuesForIndexedDocument() throws Exception
|
||||
{
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.addDocument(makeDocumentWithFields());
|
||||
writer.close();
|
||||
|
||||
|
@ -225,7 +225,7 @@ public class TestDocument extends LuceneTestCase
|
|||
doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.addDocument(doc);
|
||||
field.setValue("id2");
|
||||
writer.addDocument(doc);
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.Fieldable;
|
||||
import org.apache.lucene.search.Similarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT;
|
||||
|
||||
class DocHelper {
|
||||
public static final String FIELD_1_TEXT = "field one text";
|
||||
|
@ -219,7 +219,7 @@ class DocHelper {
|
|||
*/
|
||||
public static SegmentInfo writeDoc(Directory dir, Document doc) throws IOException
|
||||
{
|
||||
return writeDoc(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), Similarity.getDefault(), doc);
|
||||
return writeDoc(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), Similarity.getDefault(), doc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.lucene.index;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -429,7 +427,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
|
||||
private IndexWriter newWriter(Directory dir, boolean create)
|
||||
throws IOException {
|
||||
final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
return writer;
|
||||
}
|
||||
|
@ -503,7 +501,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
public void testHangOnClose() throws IOException {
|
||||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMergePolicy(new LogByteSizeMergePolicy(writer));
|
||||
writer.setMaxBufferedDocs(5);
|
||||
writer.setUseCompoundFile(false);
|
||||
|
@ -529,7 +527,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
Directory dir2 = new MockRAMDirectory();
|
||||
writer = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
|
||||
lmp.setMinMergeMB(0.0001);
|
||||
writer.setMergePolicy(lmp);
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
|
||||
public class TestAtomicUpdate extends LuceneTestCase {
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
private Random RANDOM;
|
||||
|
||||
public class MockIndexWriter extends IndexWriter {
|
||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
/*
|
||||
|
@ -218,7 +217,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
hasTested29++;
|
||||
}
|
||||
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
w.optimize();
|
||||
w.close();
|
||||
|
||||
|
@ -273,7 +272,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
}
|
||||
|
||||
public void searchIndex(String dirName, String oldName) throws IOException {
|
||||
//QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
//QueryParser parser = new QueryParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
//Query query = parser.parse("handle:1");
|
||||
|
||||
dirName = fullDir(dirName);
|
||||
|
@ -358,7 +357,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
Directory dir = FSDirectory.open(new File(dirName));
|
||||
|
||||
// open writer
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
||||
// add 10 docs
|
||||
for(int i=0;i<10;i++) {
|
||||
|
@ -402,7 +401,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
searcher.close();
|
||||
|
||||
// optimize
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -452,7 +451,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
searcher.close();
|
||||
|
||||
// optimize
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -474,7 +473,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
dirName = fullDir(dirName);
|
||||
|
||||
Directory dir = FSDirectory.open(new File(dirName));
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(doCFS);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
|
||||
|
@ -485,7 +484,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
writer.close();
|
||||
|
||||
// open fresh writer so we get no prx file in the added segment
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(doCFS);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
addNoProxDoc(writer);
|
||||
|
@ -512,7 +511,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase
|
|||
try {
|
||||
Directory dir = FSDirectory.open(new File(fullDir(outputDir)));
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setRAMBufferSizeMB(16.0);
|
||||
for(int i=0;i<35;i++) {
|
||||
addDoc(writer, i);
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.List;
|
|||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -35,7 +34,7 @@ public class TestCheckIndex extends LuceneTestCase {
|
|||
|
||||
public void testDeletedDocs() throws IOException {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -25,13 +25,11 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
||||
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
private static class FailOnlyOnFlush extends MockRAMDirectory.Failure {
|
||||
boolean doFail;
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.lucene.index;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.store.NoLockFactory;
|
||||
|
@ -36,7 +35,7 @@ public class TestCrash extends LuceneTestCase {
|
|||
private IndexWriter initIndex(MockRAMDirectory dir) throws IOException {
|
||||
dir.setLockFactory(NoLockFactory.getNoLockFactory());
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
//writer.setMaxBufferedDocs(2);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/*
|
||||
Verify we can read the pre-2.1 file format, do searches
|
||||
|
@ -202,7 +201,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
|
||||
Directory dir = new RAMDirectory();
|
||||
ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.close();
|
||||
|
||||
|
@ -211,7 +210,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
// Record last time when writer performed deletes of
|
||||
// past commits
|
||||
lastDeleteTime = System.currentTimeMillis();
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDoc(writer);
|
||||
|
@ -272,7 +271,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
Directory dir = new RAMDirectory();
|
||||
policy.dir = dir;
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||
|
@ -281,7 +280,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
@ -319,7 +318,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
// Open & close a writer and assert that it
|
||||
// actually removed something:
|
||||
int preCount = dir.listAll().length;
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.close();
|
||||
int postCount = dir.listAll().length;
|
||||
assertTrue(postCount < preCount);
|
||||
|
@ -341,7 +340,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
Directory dir = new MockRAMDirectory();
|
||||
policy.dir = dir;
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
for(int i=0;i<10;i++) {
|
||||
addDoc(writer);
|
||||
|
@ -360,7 +359,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
assertTrue(lastCommit != null);
|
||||
|
||||
// Now add 1 doc and optimize
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc(writer);
|
||||
assertEquals(11, writer.numDocs());
|
||||
writer.optimize();
|
||||
|
@ -369,7 +368,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
assertEquals(7, IndexReader.listCommits(dir).size());
|
||||
|
||||
// Now open writer on the commit just before optimize:
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
|
||||
assertEquals(10, writer.numDocs());
|
||||
|
||||
// Should undo our rollback:
|
||||
|
@ -381,7 +380,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
assertEquals(11, r.numDocs());
|
||||
r.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit);
|
||||
assertEquals(10, writer.numDocs());
|
||||
// Commits the rollback:
|
||||
writer.close();
|
||||
|
@ -397,7 +396,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
r.close();
|
||||
|
||||
// Reoptimize
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), policy, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -408,7 +407,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
|
||||
// Now open writer on the commit just before optimize,
|
||||
// but this time keeping only the last commit:
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit);
|
||||
assertEquals(10, writer.numDocs());
|
||||
|
||||
// Reader still sees optimized index, because writer
|
||||
|
@ -444,7 +443,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int i=0;i<107;i++) {
|
||||
|
@ -452,7 +451,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
@ -487,7 +486,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
|
||||
|
||||
for(int j=0;j<N+1;j++) {
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int i=0;i<17;i++) {
|
||||
|
@ -542,14 +541,14 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
|
||||
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.close();
|
||||
Term searchTerm = new Term("content", "aaa");
|
||||
Query query = new TermQuery(searchTerm);
|
||||
|
||||
for(int i=0;i<N+1;i++) {
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDoc(writer);
|
||||
|
@ -566,7 +565,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
reader.close();
|
||||
searcher.close();
|
||||
}
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.optimize();
|
||||
// this is a commit
|
||||
|
@ -637,7 +636,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N);
|
||||
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
writer.close();
|
||||
|
@ -646,7 +645,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
|
||||
for(int i=0;i<N+1;i++) {
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setUseCompoundFile(useCompoundFile);
|
||||
for(int j=0;j<17;j++) {
|
||||
|
@ -664,7 +663,7 @@ public class TestDeletionPolicy extends LuceneTestCase
|
|||
reader.close();
|
||||
searcher.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, policy, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
// This will not commit: there are no changes
|
||||
// pending because we opened for "create":
|
||||
writer.close();
|
||||
|
|
|
@ -194,7 +194,7 @@ public class TestDirectoryReader extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private void addDoc(RAMDirectory ramDir1, String s, boolean create) throws IOException {
|
||||
IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(TEST_VERSION_CURRENT), create, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
|
||||
iw.addDocument(doc);
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
|
||||
/** JUnit adaptation of an older test case DocTest. */
|
||||
|
@ -110,7 +109,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
PrintWriter out = new PrintWriter(sw, true);
|
||||
|
||||
Directory directory = FSDirectory.open(indexDir);
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
SegmentInfo si1 = indexDoc(writer, "test.txt");
|
||||
printSegment(out, si1);
|
||||
|
@ -138,7 +137,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
out = new PrintWriter(sw, true);
|
||||
|
||||
directory = FSDirectory.open(indexDir);
|
||||
writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
si1 = indexDoc(writer, "test.txt");
|
||||
printSegment(out, si1);
|
||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.lucene.document.Field.TermVector;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestDocumentWriter extends LuceneTestCase {
|
||||
|
@ -62,7 +61,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
public void testAddDocument() throws Exception {
|
||||
Document testDoc = new Document();
|
||||
DocHelper.setupDoc(testDoc);
|
||||
Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.addDocument(testDoc);
|
||||
writer.commit();
|
||||
|
@ -111,7 +110,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
Analyzer analyzer = new Analyzer() {
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
|
||||
return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -144,7 +143,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
Analyzer analyzer = new Analyzer() {
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new TokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader)) {
|
||||
return new TokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader)) {
|
||||
boolean first=true;
|
||||
AttributeSource.State state;
|
||||
|
||||
|
@ -208,7 +207,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
|
||||
|
||||
public void testPreAnalyzedField() throws IOException {
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
|
||||
doc.add(new Field("preanalyzed", new TokenStream() {
|
||||
|
@ -267,7 +266,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
doc.add(new Field("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
|
||||
doc.add(new Field("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO));
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
|
@ -300,7 +299,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
doc.add(f);
|
||||
doc.add(new Field("f2", "v2", Store.YES, Index.NO));
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.addDocument(doc);
|
||||
writer.optimize(); // be sure to have a single segment
|
||||
writer.close();
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.*;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
@ -51,7 +50,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
fieldInfos = new FieldInfos();
|
||||
DocHelper.setupDoc(testDoc);
|
||||
fieldInfos.add(testDoc);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(false);
|
||||
writer.addDocument(testDoc);
|
||||
writer.close();
|
||||
|
@ -212,7 +211,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
FSDirectory tmpDir = FSDirectory.open(file);
|
||||
assertTrue(tmpDir != null);
|
||||
|
||||
IndexWriter writer = new IndexWriter(tmpDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(tmpDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(false);
|
||||
writer.addDocument(testDoc);
|
||||
writer.close();
|
||||
|
@ -393,7 +392,7 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
|
||||
try {
|
||||
Directory dir = new FaultyFSDirectory(indexDir);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int i=0;i<2;i++)
|
||||
writer.addDocument(testDoc);
|
||||
writer.optimize();
|
||||
|
|
|
@ -19,8 +19,6 @@ package org.apache.lucene.index;
|
|||
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import junit.framework.TestSuite;
|
||||
import junit.textui.TestRunner;
|
||||
|
||||
|
@ -99,7 +97,7 @@ public class TestFilterIndexReader extends LuceneTestCase {
|
|||
*/
|
||||
public void testFilterIndexReader() throws Exception {
|
||||
RAMDirectory directory = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Document d1 = new Document();
|
||||
|
|
|
@ -18,8 +18,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -41,7 +39,7 @@ public class TestIndexFileDeleter extends LuceneTestCase
|
|||
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
int i;
|
||||
for(i=0;i<35;i++) {
|
||||
|
@ -146,7 +144,7 @@ public class TestIndexFileDeleter extends LuceneTestCase
|
|||
|
||||
// Open & close a writer: it should delete the above 4
|
||||
// files and nothing more:
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.close();
|
||||
|
||||
String[] files2 = dir.listAll();
|
||||
|
|
|
@ -54,7 +54,6 @@ import org.apache.lucene.store.MockRAMDirectory;
|
|||
import org.apache.lucene.store.NoSuchDirectoryException;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestIndexReader extends LuceneTestCase
|
||||
|
@ -79,7 +78,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
commitUserData.put("foo", "fighters");
|
||||
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
for(int i=0;i<27;i++)
|
||||
addDocumentWithFields(writer);
|
||||
|
@ -101,7 +100,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTrue(c.equals(r.getIndexCommit()));
|
||||
|
||||
// Change the index
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
for(int i=0;i<7;i++)
|
||||
addDocumentWithFields(writer);
|
||||
|
@ -112,7 +111,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertFalse(r2.getIndexCommit().isOptimized());
|
||||
r3.close();
|
||||
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -126,19 +125,19 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testIsCurrent() throws Exception
|
||||
{
|
||||
RAMDirectory d = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
// set up reader:
|
||||
IndexReader reader = IndexReader.open(d, false);
|
||||
assertTrue(reader.isCurrent());
|
||||
// modify index by adding another document:
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
assertFalse(reader.isCurrent());
|
||||
// re-create index:
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
assertFalse(reader.isCurrent());
|
||||
|
@ -154,7 +153,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
{
|
||||
RAMDirectory d = new MockRAMDirectory();
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
// set up reader
|
||||
|
@ -166,7 +165,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTrue(fieldNames.contains("unstored"));
|
||||
reader.close();
|
||||
// add more documents
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
// want to get some more segments here
|
||||
for (int i = 0; i < 5*writer.getMergeFactor(); i++)
|
||||
{
|
||||
|
@ -246,7 +245,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testTermVectors() throws Exception {
|
||||
RAMDirectory d = new MockRAMDirectory();
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
// want to get some more segments here
|
||||
// new termvector fields
|
||||
for (int i = 0; i < 5 * writer.getMergeFactor(); i++) {
|
||||
|
@ -314,7 +313,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
addDoc(writer, searchTerm.text());
|
||||
|
@ -356,7 +355,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Directory dir = new RAMDirectory();
|
||||
byte[] bin = new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
addDoc(writer, "document number " + (i + 1));
|
||||
|
@ -365,7 +364,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
addDocumentWithTermVectorFields(writer);
|
||||
}
|
||||
writer.close();
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("bin1", bin, Field.Store.YES));
|
||||
doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
@ -402,7 +401,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// force optimize
|
||||
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
|
@ -431,7 +430,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 11 documents with term : aaa
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 11; i++)
|
||||
{
|
||||
addDoc(writer, searchTerm.text());
|
||||
|
@ -476,7 +475,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 11 documents with term : aaa
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 11; i++)
|
||||
{
|
||||
addDoc(writer, searchTerm.text());
|
||||
|
@ -525,7 +524,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 1 documents with term : aaa
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc(writer, searchTerm.text());
|
||||
writer.close();
|
||||
|
||||
|
@ -570,7 +569,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 1 documents with term : aaa
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(false);
|
||||
addDoc(writer, searchTerm.text());
|
||||
writer.close();
|
||||
|
@ -624,7 +623,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Term searchTerm2 = new Term("content", "bbb");
|
||||
|
||||
// add 100 documents with term : aaa
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
addDoc(writer, searchTerm.text());
|
||||
|
@ -640,7 +639,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTermDocsCount("first reader", reader, searchTerm2, 0);
|
||||
|
||||
// add 100 documents with term : bbb
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
addDoc(writer, searchTerm2.text());
|
||||
|
@ -707,7 +706,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// Create initial data set
|
||||
File dirFile = new File(System.getProperty("tempDir"), "testIndex");
|
||||
Directory dir = getDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc(writer, "test");
|
||||
writer.close();
|
||||
dir.close();
|
||||
|
@ -717,7 +716,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
dir = getDirectory();
|
||||
|
||||
// Now create the data set again, just as before
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc(writer, "test");
|
||||
writer.close();
|
||||
dir.close();
|
||||
|
@ -743,7 +742,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
else
|
||||
dir = getDirectory();
|
||||
assertFalse(IndexReader.indexExists(dir));
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
assertTrue(IndexWriter.isLocked(dir)); // writer open, so dir is locked
|
||||
writer.close();
|
||||
|
@ -760,7 +759,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// incremented:
|
||||
Thread.sleep(1000);
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
|
@ -777,7 +776,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testVersion() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
assertFalse(IndexReader.indexExists(dir));
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
assertTrue(IndexWriter.isLocked(dir)); // writer open, so dir is locked
|
||||
writer.close();
|
||||
|
@ -788,7 +787,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
reader.close();
|
||||
// modify index and check version has been
|
||||
// incremented:
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
reader = IndexReader.open(dir, false);
|
||||
|
@ -799,10 +798,10 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
public void testLock() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
try {
|
||||
reader.deleteDocument(0);
|
||||
|
@ -819,7 +818,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
public void testUndeleteAll() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
@ -836,7 +835,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
public void testUndeleteAllAfterClose() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
@ -853,7 +852,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
public void testUndeleteAllAfterCloseThenReopen() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
@ -891,7 +890,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
// First build up a starting index:
|
||||
RAMDirectory startDir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int i=0;i<157;i++) {
|
||||
Document d = new Document();
|
||||
d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
@ -1081,7 +1080,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
public void testDocsOutOfOrderJIRA140() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int i=0;i<11;i++) {
|
||||
addDoc(writer, "aaa");
|
||||
}
|
||||
|
@ -1099,7 +1098,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
}
|
||||
reader.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// We must add more docs to get a new segment written
|
||||
for(int i=0;i<11;i++) {
|
||||
|
@ -1121,7 +1120,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testExceptionReleaseWriteLockJIRA768() throws IOException {
|
||||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc(writer, "aaa");
|
||||
writer.close();
|
||||
|
||||
|
@ -1197,7 +1196,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// add 100 documents with term : aaa
|
||||
// add 100 documents with term : bbb
|
||||
// add 100 documents with term : ccc
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 100; i++)
|
||||
{
|
||||
addDoc(writer, searchTerm1.text());
|
||||
|
@ -1421,7 +1420,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
RAMDirectory d = new MockRAMDirectory();
|
||||
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
for(int i=0;i<27;i++)
|
||||
addDocumentWithFields(writer);
|
||||
|
@ -1437,7 +1436,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTrue(c.equals(r.getIndexCommit()));
|
||||
|
||||
// Change the index
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
for(int i=0;i<7;i++)
|
||||
addDocumentWithFields(writer);
|
||||
|
@ -1448,7 +1447,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertFalse(r2.getIndexCommit().isOptimized());
|
||||
r2.close();
|
||||
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -1462,7 +1461,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
public void testReadOnly() throws Throwable {
|
||||
RAMDirectory d = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.commit();
|
||||
addDocumentWithFields(writer);
|
||||
|
@ -1476,7 +1475,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// expected
|
||||
}
|
||||
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
||||
|
@ -1493,7 +1492,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// expected
|
||||
}
|
||||
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -1511,7 +1510,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
}
|
||||
|
||||
// Make sure write lock isn't held
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.close();
|
||||
|
||||
r3.close();
|
||||
|
@ -1521,7 +1520,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// LUCENE-1474
|
||||
public void testIndexReader() throws Exception {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.addDocument(createDocument("a"));
|
||||
writer.addDocument(createDocument("b"));
|
||||
|
@ -1539,7 +1538,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testIndexReaderUnDeleteAll() throws Exception {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.addDocument(createDocument("a"));
|
||||
writer.addDocument(createDocument("b"));
|
||||
|
@ -1581,7 +1580,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
@ -1607,7 +1606,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// reuse the doc values arrays in FieldCache
|
||||
public void testFieldCacheReuseAfterClone() throws Exception {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
@ -1638,7 +1637,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// FieldCache
|
||||
public void testFieldCacheReuseAfterReopen() throws Exception {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
@ -1670,7 +1669,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// reopen switches readOnly
|
||||
public void testReopenChangeReadonly() throws Exception {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
@ -1711,7 +1710,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// LUCENE-1586: getUniqueTermCount
|
||||
public void testUniqueTermCount() throws Exception {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
|
||||
doc.add(new Field("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
@ -1744,7 +1743,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// LUCENE-1609: don't load terms index
|
||||
public void testNoTermsIndex() throws Throwable {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED));
|
||||
doc.add(new Field("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
@ -1762,7 +1761,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded());
|
||||
|
||||
assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor());
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
|
@ -1781,7 +1780,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// LUCENE-2046
|
||||
public void testPrepareCommitIsCurrent() throws Throwable {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests cloning multiple types of readers, modifying the deletedDocs and norms
|
||||
|
@ -198,7 +197,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
TestIndexReaderReopen.createIndex(dir1, true);
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
IndexWriter w = new IndexWriter(dir1, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir1, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
w.optimize();
|
||||
w.close();
|
||||
IndexReader reader2 = reader1.clone(true);
|
||||
|
@ -485,7 +484,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
public void testCloseStoredFields() throws Exception {
|
||||
final Directory dir = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w.setUseCompoundFile(false);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
|
|
@ -72,7 +72,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
similarityOne = new SimilarityOne();
|
||||
anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
anlzr = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.lucene.store.MockRAMDirectory;
|
|||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.BitVector;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestIndexReaderReopen extends LuceneTestCase {
|
||||
|
||||
|
@ -703,7 +702,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
final Directory dir = new MockRAMDirectory();
|
||||
final int n = 30;
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < n; i++) {
|
||||
writer.addDocument(createDocument(i, 3));
|
||||
}
|
||||
|
@ -722,7 +721,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
modifier.deleteDocument(i % modifier.maxDoc());
|
||||
modifier.close();
|
||||
} else {
|
||||
IndexWriter modifier = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter modifier = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
modifier.addDocument(createDocument(n + i, 6));
|
||||
modifier.close();
|
||||
}
|
||||
|
@ -947,7 +946,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
public static void createIndex(Directory dir, boolean multiSegment) throws IOException {
|
||||
IndexWriter.unlock(dir);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
w.setMergePolicy(new LogDocMergePolicy(w));
|
||||
|
||||
|
@ -992,7 +991,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
static void modifyIndex(int i, Directory dir) throws IOException {
|
||||
switch (i) {
|
||||
case 0: {
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
w.deleteDocuments(new Term("field2", "a11"));
|
||||
w.deleteDocuments(new Term("field2", "b30"));
|
||||
w.close();
|
||||
|
@ -1007,13 +1006,13 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
break;
|
||||
}
|
||||
case 2: {
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
w.optimize();
|
||||
w.close();
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
w.addDocument(createDocument(101, 4));
|
||||
w.optimize();
|
||||
w.addDocument(createDocument(102, 4));
|
||||
|
@ -1029,7 +1028,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
break;
|
||||
}
|
||||
case 5: {
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
w.addDocument(createDocument(101, 4));
|
||||
w.close();
|
||||
break;
|
||||
|
@ -1193,7 +1192,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
public void testReopenOnCommit() throws Throwable {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), new KeepAllCommits(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), new KeepAllCommits(), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
for(int i=0;i<4;i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -29,7 +29,6 @@ import org.apache.lucene.search.TermQuery;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestIndexWriterDelete extends LuceneTestCase {
|
||||
|
||||
|
@ -43,7 +42,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setUseCompoundFile(true);
|
||||
modifier.setMaxBufferedDeleteTerms(1);
|
||||
|
||||
|
@ -80,7 +79,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setMaxBufferedDocs(2);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -115,7 +114,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
public void testMaxBufferedDeletes() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDeleteTerms(1);
|
||||
writer.deleteDocuments(new Term("foobar", "1"));
|
||||
writer.deleteDocuments(new Term("foobar", "1"));
|
||||
|
@ -130,7 +129,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
for(int t=0;t<2;t++) {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setMaxBufferedDocs(4);
|
||||
modifier.setMaxBufferedDeleteTerms(4);
|
||||
|
||||
|
@ -172,7 +171,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
public void testBothDeletes() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setMaxBufferedDocs(100);
|
||||
modifier.setMaxBufferedDeleteTerms(100);
|
||||
|
||||
|
@ -205,7 +204,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
public void testBatchDeletes() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setMaxBufferedDocs(2);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -249,7 +248,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
public void testDeleteAll() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setMaxBufferedDocs(2);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -296,7 +295,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
public void testDeleteAllRollback() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setMaxBufferedDocs(2);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -334,7 +333,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
public void testDeleteAllNRT() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setMaxBufferedDocs(2);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -426,7 +425,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
// First build up a starting index:
|
||||
MockRAMDirectory startDir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(startDir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
for (int i = 0; i < 157; i++) {
|
||||
Document d = new Document();
|
||||
d.add(new Field("id", Integer.toString(i), Field.Store.YES,
|
||||
|
@ -449,7 +448,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
MockRAMDirectory dir = new MockRAMDirectory(startDir);
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
||||
modifier.setMaxBufferedDocs(1000); // use flush or close
|
||||
modifier.setMaxBufferedDeleteTerms(1000); // use flush or close
|
||||
|
@ -655,7 +654,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
modifier.setUseCompoundFile(true);
|
||||
modifier.setMaxBufferedDeleteTerms(2);
|
||||
|
||||
|
@ -764,7 +763,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
||||
dir.failOn(failure.reset());
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.Random;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -135,7 +134,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
public void testRandomExceptions() throws Throwable {
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
|
||||
MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
|
||||
//writer.setMaxBufferedDocs(10);
|
||||
writer.setRAMBufferSizeMB(0.1);
|
||||
|
@ -173,7 +172,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
public void testRandomExceptionsThreads() throws Throwable {
|
||||
|
||||
MockRAMDirectory dir = new MockRAMDirectory();
|
||||
MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
MockIndexWriter writer = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions();
|
||||
//writer.setMaxBufferedDocs(10);
|
||||
writer.setRAMBufferSizeMB(0.2);
|
||||
|
|
|
@ -75,10 +75,10 @@ public class TestIndexWriterLockRelease extends LuceneTestCase {
|
|||
IndexWriter im;
|
||||
FSDirectory dir = FSDirectory.open(this.__test_dir);
|
||||
try {
|
||||
im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
} catch (FileNotFoundException e) {
|
||||
try {
|
||||
im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
} catch (FileNotFoundException e1) {
|
||||
}
|
||||
} finally {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -35,7 +34,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testNormalCase() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(10);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
|
@ -52,7 +51,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testNoOverMerge() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(10);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
|
@ -74,7 +73,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testForceFlush() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(10);
|
||||
LogDocMergePolicy mp = new LogDocMergePolicy(writer);
|
||||
|
@ -85,7 +84,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
addDoc(writer);
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergePolicy(mp);
|
||||
mp.setMinMergeDocs(100);
|
||||
|
@ -100,7 +99,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testMergeFactorChange() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(100);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
|
@ -126,7 +125,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testMaxBufferedDocsChange() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDocs(101);
|
||||
writer.setMergeFactor(101);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
|
@ -140,7 +139,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
}
|
||||
writer.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMaxBufferedDocs(101);
|
||||
writer.setMergeFactor(101);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
|
@ -171,7 +170,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testMergeDocCount0() throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(100);
|
||||
|
@ -186,7 +185,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
reader.deleteDocuments(new Term("content", "aaa"));
|
||||
reader.close();
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(5);
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
|
||||
Directory merged = new MockRAMDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMergeFactor(2);
|
||||
|
||||
writer.addIndexesNoOptimize(new Directory[]{indexA, indexB});
|
||||
|
@ -93,7 +93,7 @@ public class TestIndexWriterMerging extends LuceneTestCase
|
|||
private void fillIndex(Directory dir, int start, int numDocs) throws IOException
|
||||
{
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMergeFactor(2);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
|
||||
|
@ -77,7 +76,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
boolean optimize = true;
|
||||
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// create the index
|
||||
|
@ -112,7 +111,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
assertEquals(0, count(new Term("id", id10), r3));
|
||||
assertEquals(1, count(new Term("id", Integer.toString(8000)), r3));
|
||||
|
||||
writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
@ -140,7 +139,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
boolean optimize = false;
|
||||
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
// create the index
|
||||
|
@ -149,7 +148,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
// create a 2nd index
|
||||
Directory dir2 = new MockRAMDirectory();
|
||||
IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer2.setInfoStream(infoStream);
|
||||
createIndexNoClose(!optimize, "index2", writer2);
|
||||
|
@ -187,13 +186,13 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
boolean optimize = false;
|
||||
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
|
||||
// create a 2nd index
|
||||
Directory dir2 = new MockRAMDirectory();
|
||||
IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer2.setInfoStream(infoStream);
|
||||
createIndexNoClose(!optimize, "index2", writer2);
|
||||
|
@ -222,7 +221,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
boolean optimize = true;
|
||||
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
// create the index
|
||||
|
@ -261,7 +260,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// reopen the writer to verify the delete made it to the directory
|
||||
writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
IndexReader w2r1 = writer.getReader();
|
||||
|
@ -276,7 +275,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
int numDirs = 3;
|
||||
|
||||
Directory mainDir = new MockRAMDirectory();
|
||||
IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter mainWriter = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
mainWriter.setInfoStream(infoStream);
|
||||
AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
|
||||
|
@ -384,7 +383,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
this.numDirs = numDirs;
|
||||
this.mainWriter = mainWriter;
|
||||
addDir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(addDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
for (int i = 0; i < NUM_INIT_DOCS; i++) {
|
||||
|
@ -492,7 +491,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
*/
|
||||
public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception {
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
IndexReader r1 = writer.getReader();
|
||||
|
@ -530,7 +529,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// test whether the changes made it to the directory
|
||||
writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexReader w2r1 = writer.getReader();
|
||||
// insure the deletes were actually flushed to the directory
|
||||
|
@ -571,7 +570,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
*/
|
||||
public static void createIndex(Directory dir1, String indexName,
|
||||
boolean multiSegment) throws IOException {
|
||||
IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
w.setMergePolicy(new LogDocMergePolicy(w));
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
@ -606,7 +605,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
public void testMergeWarmer() throws Exception {
|
||||
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
|
||||
|
@ -641,7 +640,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
public void testAfterCommit() throws Exception {
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
|
||||
|
@ -674,7 +673,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
// Make sure reader remains usable even if IndexWriter closes
|
||||
public void testAfterClose() throws Exception {
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
|
||||
|
@ -704,7 +703,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
// Stress test reopen during addIndexes
|
||||
public void testDuringAddIndexes() throws Exception {
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
writer.setMergeFactor(2);
|
||||
|
@ -782,7 +781,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
// Stress test reopen during add/delete
|
||||
public void testDuringAddDelete() throws Exception {
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
final IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setInfoStream(infoStream);
|
||||
writer.setMergeFactor(2);
|
||||
|
@ -863,7 +862,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
public void testExpungeDeletes() throws Throwable {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
@ -888,7 +887,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
|||
|
||||
public void testDeletesNumDocs() throws Throwable {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
final IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a b c", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
import org.apache.lucene.document.*;
|
||||
|
@ -64,7 +63,7 @@ public class TestLazyBug extends LuceneTestCase {
|
|||
Directory dir = new RAMDirectory();
|
||||
try {
|
||||
Random r = newRandom();
|
||||
Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
writer.setUseCompoundFile(false);
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests lazy skipping on the proximity file.
|
||||
|
@ -61,7 +60,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
int numDocs = 500;
|
||||
|
||||
Directory directory = new SeekCountingDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(false);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
|
@ -119,7 +118,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
|
|||
|
||||
public void testSeek() throws IOException {
|
||||
Directory directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.document.Field.Store;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* This testcase tests whether multi-level skipping is being used
|
||||
|
@ -92,7 +91,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
|
|||
private static class PayloadAnalyzer extends Analyzer {
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new PayloadFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
|
||||
return new PayloadFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.index.TestIndexWriterReader.HeavyAtomicInt;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestNRTReaderWithThreads extends LuceneTestCase {
|
||||
Random random = new Random();
|
||||
|
@ -33,7 +32,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase {
|
|||
|
||||
public void testIndexing() throws Exception {
|
||||
Directory mainDir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setUseCompoundFile(false);
|
||||
IndexReader reader = writer.getReader(); // start pooling readers
|
||||
|
|
|
@ -65,7 +65,7 @@ public class TestNorms extends LuceneTestCase {
|
|||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
similarityOne = new SimilarityOne();
|
||||
anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
anlzr = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,7 +66,7 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
// omitTermFreqAndPositions bit in the FieldInfo
|
||||
public void testOmitTermFreqAndPositions() throws Exception {
|
||||
Directory ram = new MockRAMDirectory();
|
||||
Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document d = new Document();
|
||||
|
||||
|
@ -112,7 +112,7 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
// omitTermFreqAndPositions for the same field works
|
||||
public void testMixedMerge() throws Exception {
|
||||
Directory ram = new MockRAMDirectory();
|
||||
Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(3);
|
||||
writer.setMergeFactor(2);
|
||||
|
@ -165,7 +165,7 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
// field,
|
||||
public void testMixedRAM() throws Exception {
|
||||
Directory ram = new MockRAMDirectory();
|
||||
Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setMergeFactor(2);
|
||||
|
@ -213,7 +213,7 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
// Verifies no *.prx exists when all fields omit term freq:
|
||||
public void testNoPrxFile() throws Throwable {
|
||||
Directory ram = new MockRAMDirectory();
|
||||
Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(3);
|
||||
writer.setMergeFactor(2);
|
||||
|
@ -244,7 +244,7 @@ public class TestOmitTf extends LuceneTestCase {
|
|||
// Test scores with one field with Term Freqs and one without, otherwise with equal content
|
||||
public void testBasic() throws Exception {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMergeFactor(2);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
|
|
|
@ -106,7 +106,7 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
|
||||
// one document only:
|
||||
Directory dir2 = new MockRAMDirectory();
|
||||
IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document d3 = new Document();
|
||||
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
w2.addDocument(d3);
|
||||
|
@ -151,13 +151,13 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
Directory dir2 = getDir2();
|
||||
|
||||
// add another document to ensure that the indexes are not optimized
|
||||
IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document d = new Document();
|
||||
d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
modifier.addDocument(d);
|
||||
modifier.close();
|
||||
|
||||
modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
d = new Document();
|
||||
d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED));
|
||||
modifier.addDocument(d);
|
||||
|
@ -170,7 +170,7 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
assertFalse(pr.isOptimized());
|
||||
pr.close();
|
||||
|
||||
modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
modifier = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
modifier.optimize();
|
||||
modifier.close();
|
||||
|
||||
|
@ -182,7 +182,7 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
pr.close();
|
||||
|
||||
|
||||
modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
modifier = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
modifier.optimize();
|
||||
modifier.close();
|
||||
|
||||
|
@ -233,7 +233,7 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
// Fields 1-4 indexed together:
|
||||
private Searcher single() throws IOException {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document d1 = new Document();
|
||||
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
@ -263,7 +263,7 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
|
||||
private Directory getDir1() throws IOException {
|
||||
Directory dir1 = new MockRAMDirectory();
|
||||
IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document d1 = new Document();
|
||||
d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
@ -278,7 +278,7 @@ public class TestParallelReader extends LuceneTestCase {
|
|||
|
||||
private Directory getDir2() throws IOException {
|
||||
Directory dir2 = new RAMDirectory();
|
||||
IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document d3 = new Document();
|
||||
d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.lucene.index;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
|
@ -48,7 +47,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
*/
|
||||
public void testEmptyIndex() throws IOException {
|
||||
RAMDirectory rd1 = new MockRAMDirectory();
|
||||
IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
MaxFieldLength.UNLIMITED);
|
||||
iw.close();
|
||||
|
||||
|
@ -56,7 +55,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
|
||||
RAMDirectory rdOut = new MockRAMDirectory();
|
||||
|
||||
IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
MaxFieldLength.UNLIMITED);
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(rd1,true));
|
||||
|
@ -81,7 +80,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
public void testEmptyIndexWithVectors() throws IOException {
|
||||
RAMDirectory rd1 = new MockRAMDirectory();
|
||||
{
|
||||
IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("test", "", Store.NO, Index.ANALYZED,
|
||||
|
@ -96,7 +95,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
ir.deleteDocument(0);
|
||||
ir.close();
|
||||
|
||||
iw = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), false,
|
||||
iw = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), false,
|
||||
MaxFieldLength.UNLIMITED);
|
||||
iw.optimize();
|
||||
iw.close();
|
||||
|
@ -104,7 +103,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
|
||||
RAMDirectory rd2 = new MockRAMDirectory();
|
||||
{
|
||||
IndexWriter iw = new IndexWriter(rd2, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iw = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
iw.addDocument(doc);
|
||||
|
@ -113,7 +112,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase {
|
|||
|
||||
RAMDirectory rdOut = new MockRAMDirectory();
|
||||
|
||||
IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iwOut = new IndexWriter(rdOut, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
MaxFieldLength.UNLIMITED);
|
||||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(rd1,true));
|
||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.lucene.index;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -39,7 +37,7 @@ public class TestParallelTermEnum extends LuceneTestCase {
|
|||
Document doc;
|
||||
|
||||
RAMDirectory rd1 = new RAMDirectory();
|
||||
IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter iw1 = new IndexWriter(rd1, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new Field("field1", "the quick brown fox jumps", Store.YES,
|
||||
|
@ -51,7 +49,7 @@ public class TestParallelTermEnum extends LuceneTestCase {
|
|||
|
||||
iw1.close();
|
||||
RAMDirectory rd2 = new RAMDirectory();
|
||||
IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter iw2 = new IndexWriter(rd2, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new Field("field0", "", Store.NO, Index.ANALYZED));
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.lucene.store.FSDirectory;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
|
||||
|
@ -396,7 +395,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
PayloadData payload = fieldToData.get(fieldName);
|
||||
TokenStream ts = new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
|
||||
TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
|
||||
if (payload != null) {
|
||||
if (payload.numFieldInstancesToSkip == 0) {
|
||||
ts = new PayloadFilter(ts, payload.data, payload.offset, payload.length);
|
||||
|
@ -469,7 +468,7 @@ public class TestPayloads extends LuceneTestCase {
|
|||
final ByteArrayPool pool = new ByteArrayPool(numThreads, 5);
|
||||
|
||||
Directory dir = new RAMDirectory();
|
||||
final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
final String field = "test";
|
||||
|
||||
Thread[] ingesters = new Thread[numThreads];
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -103,7 +102,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
|
|||
|
||||
public void testSkipTo(int indexDivisor) throws IOException {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Term ta = new Term("content","aaa");
|
||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.lucene.index;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -38,7 +36,7 @@ public class TestSegmentTermEnum extends LuceneTestCase
|
|||
{
|
||||
IndexWriter writer = null;
|
||||
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// ADD 100 documents with term : aaa
|
||||
// add 100 documents with terms: aaa bbb
|
||||
|
@ -54,7 +52,7 @@ public class TestSegmentTermEnum extends LuceneTestCase
|
|||
verifyDocFreq();
|
||||
|
||||
// merge segments by optimizing the index
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
|
@ -65,7 +63,7 @@ public class TestSegmentTermEnum extends LuceneTestCase
|
|||
public void testPrevTermAtEnd() throws IOException
|
||||
{
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc(writer, "aaa bbb");
|
||||
writer.close();
|
||||
SegmentReader reader = SegmentReader.getOnlySegmentReader(dir);
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.Random;
|
|||
import java.io.File;
|
||||
|
||||
public class TestStressIndexing extends LuceneTestCase {
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
private Random RANDOM;
|
||||
|
||||
private static abstract class TimedThread extends Thread {
|
||||
|
|
|
@ -19,7 +19,6 @@ import org.apache.lucene.document.*;
|
|||
import org.apache.lucene.analysis.*;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -124,7 +123,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
|
||||
public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
|
||||
Map<String,Document> docs = new HashMap<String,Document>();
|
||||
IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w.setUseCompoundFile(false);
|
||||
|
||||
/***
|
||||
|
@ -176,7 +175,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
public Map<String,Document> indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException {
|
||||
Map<String,Document> docs = new HashMap<String,Document>();
|
||||
for(int iter=0;iter<3;iter++) {
|
||||
IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w.setUseCompoundFile(false);
|
||||
|
||||
// force many merges
|
||||
|
@ -219,7 +218,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
|||
|
||||
|
||||
public static void indexSerial(Map<String,Document> docs, Directory dir) throws IOException {
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
||||
// index all docs in a single thread
|
||||
Iterator<Document> iter = docs.values().iterator();
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.store.FSDirectory;
|
|||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
import org.apache.lucene.util.English;
|
||||
|
||||
|
@ -35,7 +34,7 @@ import java.io.File;
|
|||
|
||||
public class TestThreadedOptimize extends LuceneTestCase {
|
||||
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
private static final Analyzer ANALYZER = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
private final static int NUM_THREADS = 3;
|
||||
//private final static int NUM_THREADS = 5;
|
||||
|
|
|
@ -27,8 +27,6 @@ import java.util.Map;
|
|||
import java.util.HashMap;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -67,7 +65,7 @@ public class TestTransactionRollback extends LuceneTestCase {
|
|||
if (last==null)
|
||||
throw new RuntimeException("Couldn't find commit point "+id);
|
||||
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
new RollbackDeletionPolicy(id), MaxFieldLength.UNLIMITED, last);
|
||||
Map<String,String> data = new HashMap<String,String>();
|
||||
data.put("index", "Rolled back to 1-"+id);
|
||||
|
@ -129,7 +127,7 @@ public class TestTransactionRollback extends LuceneTestCase {
|
|||
|
||||
//Build index, of records 1 to 100, committing after each batch of 10
|
||||
IndexDeletionPolicy sdp=new KeepAllDeletionPolicy();
|
||||
IndexWriter w=new IndexWriter(dir,new WhitespaceAnalyzer(Version.LUCENE_CURRENT),sdp,MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w=new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),sdp,MaxFieldLength.UNLIMITED);
|
||||
for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) {
|
||||
Document doc=new Document();
|
||||
doc.add(new Field(FIELD_RECORD_ID,""+currentRecordId,Field.Store.YES,Field.Index.ANALYZED));
|
||||
|
@ -197,7 +195,7 @@ public class TestTransactionRollback extends LuceneTestCase {
|
|||
for(int i=0;i<2;i++) {
|
||||
// Unless you specify a prior commit point, rollback
|
||||
// should not work:
|
||||
new IndexWriter(dir,new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
new IndexWriter(dir,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
new DeleteLastCommitPolicy(),
|
||||
MaxFieldLength.UNLIMITED).close();
|
||||
IndexReader r = IndexReader.open(dir, true);
|
||||
|
|
|
@ -88,12 +88,12 @@ public class TestTransactions extends LuceneTestCase
|
|||
@Override
|
||||
public void doWork() throws Throwable {
|
||||
|
||||
IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer1.setMaxBufferedDocs(3);
|
||||
writer1.setMergeFactor(2);
|
||||
((ConcurrentMergeScheduler) writer1.getMergeScheduler()).setSuppressExceptions();
|
||||
|
||||
IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
// Intentionally use different params so flush/merge
|
||||
// happen @ different times
|
||||
writer2.setMaxBufferedDocs(2);
|
||||
|
@ -178,7 +178,7 @@ public class TestTransactions extends LuceneTestCase
|
|||
}
|
||||
|
||||
public void initIndex(Directory dir) throws Throwable {
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.LIMITED);
|
||||
for(int j=0; j<7; j++) {
|
||||
Document d = new Document();
|
||||
int n = RANDOM.nextInt();
|
||||
|
|
|
@ -44,7 +44,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
public void testMultiAnalyzer() throws ParseException {
|
||||
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "", new MultiAnalyzer());
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "", new MultiAnalyzer());
|
||||
|
||||
// trivial, no multiple tokens:
|
||||
assertEquals("foo", qp.parse("foo").toString());
|
||||
|
@ -135,9 +135,9 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
|
||||
TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
|
||||
result = new TestFilter(result);
|
||||
result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
|
||||
result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -203,9 +203,9 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
|
|||
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
|
||||
TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
|
||||
result = new TestPosIncrementFilter(result);
|
||||
result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
|
||||
result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
|
|||
private final static class DumbQueryParser extends QueryParser {
|
||||
|
||||
public DumbQueryParser(String f, Analyzer a) {
|
||||
super(Version.LUCENE_CURRENT, f, a);
|
||||
super(TEST_VERSION_CURRENT, f, a);
|
||||
}
|
||||
|
||||
/** expose super's version */
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.lucene.search.BooleanClause.Occur;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests QueryParser.
|
||||
|
@ -60,18 +59,18 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
String[] fields = {"b", "t"};
|
||||
Occur occur[] = {Occur.SHOULD, Occur.SHOULD};
|
||||
TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer();
|
||||
MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, a);
|
||||
MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, a);
|
||||
|
||||
Query q = mfqp.parse(qtxt);
|
||||
assertEquals(expectedRes, q.toString());
|
||||
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, qtxt, fields, occur, a);
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, qtxt, fields, occur, a);
|
||||
assertEquals(expectedRes, q.toString());
|
||||
}
|
||||
|
||||
public void testSimple() throws Exception {
|
||||
String[] fields = {"b", "t"};
|
||||
MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
|
||||
Query q = mfqp.parse("one");
|
||||
assertEquals("b:one t:one", q.toString());
|
||||
|
@ -134,7 +133,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
boosts.put("b", Float.valueOf(5));
|
||||
boosts.put("t", Float.valueOf(10));
|
||||
String[] fields = {"b", "t"};
|
||||
MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts);
|
||||
MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new StandardAnalyzer(TEST_VERSION_CURRENT), boosts);
|
||||
|
||||
|
||||
//Check for simple
|
||||
|
@ -160,24 +159,24 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
public void testStaticMethod1() throws ParseException {
|
||||
String[] fields = {"b", "t"};
|
||||
String[] queries = {"one", "two"};
|
||||
Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("b:one t:two", q.toString());
|
||||
|
||||
String[] queries2 = {"+one", "+two"};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("(+b:one) (+t:two)", q.toString());
|
||||
|
||||
String[] queries3 = {"one", "+two"};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("b:one (+t:two)", q.toString());
|
||||
|
||||
String[] queries4 = {"one +more", "+two"};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("(b:one +b:more) (+t:two)", q.toString());
|
||||
|
||||
String[] queries5 = {"blah"};
|
||||
try {
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
fail();
|
||||
} catch(IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
|
@ -187,11 +186,11 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
TestQueryParser.QPTestAnalyzer stopA = new TestQueryParser.QPTestAnalyzer();
|
||||
|
||||
String[] queries6 = {"((+stop))", "+((stop))"};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries6, fields, stopA);
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries6, fields, stopA);
|
||||
assertEquals("", q.toString());
|
||||
|
||||
String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries7, fields, stopA);
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries7, fields, stopA);
|
||||
assertEquals("(b:one +b:more) (+t:two)", q.toString());
|
||||
|
||||
}
|
||||
|
@ -199,15 +198,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
public void testStaticMethod2() throws ParseException {
|
||||
String[] fields = {"b", "t"};
|
||||
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
|
||||
Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("+b:one -t:one", q.toString());
|
||||
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
fail();
|
||||
} catch(IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
|
@ -219,15 +218,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
//int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
|
||||
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
|
||||
|
||||
Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer());
|
||||
Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));//, fields, flags, new StandardAnalyzer());
|
||||
assertEquals("+b:one -t:one", q.toString());
|
||||
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
fail();
|
||||
} catch(IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
|
@ -239,12 +238,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
String[] fields = {"f1", "f2", "f3"};
|
||||
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST,
|
||||
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD};
|
||||
Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("+f1:one -f2:two f3:three", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
fail();
|
||||
} catch(IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
|
@ -255,12 +254,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
String[] queries = {"one", "two"};
|
||||
String[] fields = {"b", "t"};
|
||||
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
|
||||
Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals("+b:one -t:two", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
|
||||
q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
fail();
|
||||
} catch(IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
|
@ -269,7 +268,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
|
||||
public void testAnalyzerReturningNull() throws ParseException {
|
||||
String[] fields = new String[] { "f1", "f2", "f3" };
|
||||
MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new AnalyzerReturningNull());
|
||||
MultiFieldQueryParser parser = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new AnalyzerReturningNull());
|
||||
Query q = parser.parse("bla AND blo");
|
||||
assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString());
|
||||
// the following queries are not affected as their terms are not analyzed anyway:
|
||||
|
@ -282,7 +281,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testStopWordSearching() throws Exception {
|
||||
Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
Directory ramDir = new RAMDirectory();
|
||||
IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
|
@ -291,7 +290,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
iw.close();
|
||||
|
||||
MultiFieldQueryParser mfqp =
|
||||
new MultiFieldQueryParser(Version.LUCENE_CURRENT, new String[] {"body"}, analyzer);
|
||||
new MultiFieldQueryParser(TEST_VERSION_CURRENT, new String[] {"body"}, analyzer);
|
||||
mfqp.setDefaultOperator(QueryParser.Operator.AND);
|
||||
Query q = mfqp.parse("the footest");
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
@ -304,7 +303,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase {
|
|||
* Return empty tokens for field "f1".
|
||||
*/
|
||||
private static class AnalyzerReturningNull extends Analyzer {
|
||||
StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
StandardAnalyzer stdAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
public AnalyzerReturningNull() {
|
||||
}
|
||||
|
|
|
@ -64,7 +64,6 @@ import org.apache.lucene.store.RAMDirectory;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.LocalizedTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests QueryParser.
|
||||
|
@ -128,13 +127,13 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
/** Filters LowerCaseTokenizer with StopFilter. */
|
||||
@Override
|
||||
public final TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
|
||||
return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
|
||||
}
|
||||
}
|
||||
|
||||
public static class QPTestParser extends QueryParser {
|
||||
public QPTestParser(String f, Analyzer a) {
|
||||
super(Version.LUCENE_CURRENT, f, a);
|
||||
super(TEST_VERSION_CURRENT, f, a);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -158,8 +157,8 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
|
||||
public QueryParser getParser(Analyzer a) throws Exception {
|
||||
if (a == null)
|
||||
a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a);
|
||||
a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
|
||||
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
|
||||
return qp;
|
||||
}
|
||||
|
@ -228,8 +227,8 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
public Query getQueryDOA(String query, Analyzer a)
|
||||
throws Exception {
|
||||
if (a == null)
|
||||
a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a);
|
||||
a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a);
|
||||
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
|
||||
return qp.parse(query);
|
||||
}
|
||||
|
@ -253,8 +252,8 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
|
||||
public void testSimple() throws Exception {
|
||||
assertQueryEquals("term term term", null, "term term term");
|
||||
assertQueryEquals("türm term term", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "türm term term");
|
||||
assertQueryEquals("ümlaut", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "ümlaut");
|
||||
assertQueryEquals("türm term term", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "türm term term");
|
||||
assertQueryEquals("ümlaut", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "ümlaut");
|
||||
|
||||
assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
|
||||
assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:");
|
||||
|
@ -301,7 +300,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
|
||||
"+(title:dog title:cat) -author:\"bob dole\"");
|
||||
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
// make sure OR is the default:
|
||||
assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator());
|
||||
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
|
||||
|
@ -311,7 +310,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
}
|
||||
|
||||
public void testPunct() throws Exception {
|
||||
Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
|
||||
assertQueryEquals("a&b", a, "a&b");
|
||||
assertQueryEquals("a&&b", a, "a&&b");
|
||||
assertQueryEquals(".NET", a, ".NET");
|
||||
|
@ -331,7 +330,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
assertQueryEquals("term 1.0 1 2", null, "term");
|
||||
assertQueryEquals("term term1 term2", null, "term term term");
|
||||
|
||||
Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
assertQueryEquals("3", a, "3");
|
||||
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
|
||||
assertQueryEquals("term term1 term2", a, "term term1 term2");
|
||||
|
@ -456,7 +455,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
assertQueryEquals("[ a TO z]", null, "[a TO z]");
|
||||
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
|
||||
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT));
|
||||
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
|
||||
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
|
||||
|
||||
|
@ -473,7 +472,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
public void testFarsiRangeCollating() throws Exception {
|
||||
|
||||
RAMDirectory ramDir = new RAMDirectory();
|
||||
IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content","\u0633\u0627\u0628",
|
||||
|
@ -482,7 +481,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "content", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
|
||||
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
|
||||
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
|
||||
|
@ -580,7 +579,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
final String defaultField = "default";
|
||||
final String monthField = "month";
|
||||
final String hourField = "hour";
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new SimpleAnalyzer(TEST_VERSION_CURRENT));
|
||||
|
||||
// Don't set any date resolution and verify if DateField is used
|
||||
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
|
||||
|
@ -621,7 +620,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
}
|
||||
|
||||
public void testEscaped() throws Exception {
|
||||
Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
/*assertQueryEquals("\\[brackets", a, "\\[brackets");
|
||||
assertQueryEquals("\\[brackets", null, "brackets");
|
||||
|
@ -715,7 +714,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
}
|
||||
|
||||
public void testQueryStringEscaping() throws Exception {
|
||||
Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
|
||||
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
|
||||
|
@ -802,8 +801,8 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
throws Exception {
|
||||
Set<Object> stopWords = new HashSet<Object>(1);
|
||||
stopWords.add("on");
|
||||
StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, stopWords);
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", oneStopAnalyzer);
|
||||
StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, stopWords);
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", oneStopAnalyzer);
|
||||
Query q = qp.parse("on^1.0");
|
||||
assertNotNull(q);
|
||||
q = qp.parse("\"hello\"^2.0");
|
||||
|
@ -815,7 +814,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
q = qp.parse("\"on\"^1.0");
|
||||
assertNotNull(q);
|
||||
|
||||
QueryParser qp2 = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
q = qp2.parse("the^3");
|
||||
// "the" is a stop word so the result is an empty query:
|
||||
assertNotNull(q);
|
||||
|
@ -844,7 +843,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
|
||||
public void testCustomQueryParserWildcard() {
|
||||
try {
|
||||
new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t");
|
||||
new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t");
|
||||
fail("Wildcard queries should not be allowed");
|
||||
} catch (ParseException expected) {
|
||||
// expected exception
|
||||
|
@ -853,7 +852,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
|
||||
public void testCustomQueryParserFuzzy() throws Exception {
|
||||
try {
|
||||
new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~");
|
||||
new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~");
|
||||
fail("Fuzzy queries should not be allowed");
|
||||
} catch (ParseException expected) {
|
||||
// expected exception
|
||||
|
@ -863,7 +862,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
public void testBooleanQuery() throws Exception {
|
||||
BooleanQuery.setMaxClauseCount(2);
|
||||
try {
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
qp.parse("one two three");
|
||||
fail("ParseException expected due to too many boolean clauses");
|
||||
} catch (ParseException expected) {
|
||||
|
@ -875,7 +874,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
* This test differs from TestPrecedenceQueryParser
|
||||
*/
|
||||
public void testPrecedence() throws Exception {
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
Query query1 = qp.parse("A AND B OR C AND D");
|
||||
Query query2 = qp.parse("+A +B +C +D");
|
||||
assertEquals(query1, query2);
|
||||
|
@ -883,7 +882,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
|
||||
public void testLocalDateFormat() throws IOException, ParseException {
|
||||
RAMDirectory ramDir = new RAMDirectory();
|
||||
IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
|
||||
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
|
||||
iw.close();
|
||||
|
@ -899,7 +898,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
|
||||
public void testStarParsing() throws Exception {
|
||||
final int[] type = new int[1];
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)) {
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)) {
|
||||
@Override
|
||||
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
|
||||
// override error checking of superclass
|
||||
|
@ -958,7 +957,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
}
|
||||
|
||||
public void testStopwords() throws Exception {
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "foo")));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo")));
|
||||
Query result = qp.parse("a:the OR a:foo");
|
||||
assertNotNull("result is null and it shouldn't be", result);
|
||||
assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
|
||||
|
@ -974,7 +973,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
}
|
||||
|
||||
public void testPositionIncrement() throws Exception {
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "in", "are", "this")));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this")));
|
||||
qp.setEnablePositionIncrements(true);
|
||||
String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
|
||||
// 0 2 5 7 8
|
||||
|
@ -991,7 +990,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
}
|
||||
|
||||
public void testMatchAllDocs() throws Exception {
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*"));
|
||||
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)"));
|
||||
BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*");
|
||||
|
@ -1000,7 +999,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
}
|
||||
|
||||
private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException {
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "date", new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
qp.setLocale(Locale.ENGLISH);
|
||||
Query q = qp.parse(query);
|
||||
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
|
||||
|
@ -1028,7 +1027,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
// "match"
|
||||
public void testPositionIncrements() throws Exception {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT);
|
||||
Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
IndexWriter w = new IndexWriter(dir, a, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
@ -1036,7 +1035,7 @@ public class TestQueryParser extends LocalizedTestCase {
|
|||
IndexReader r = w.getReader();
|
||||
w.close();
|
||||
IndexSearcher s = new IndexSearcher(r);
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "f", a);
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "f", a);
|
||||
Query q = qp.parse("\"wizard of ozzy\"");
|
||||
assertEquals(1, s.search(q, 1).totalHits);
|
||||
r.close();
|
||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.lucene.search;
|
|||
import java.util.Random;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -98,7 +96,7 @@ public class BaseTestRangeFilter extends LuceneTestCase {
|
|||
try {
|
||||
|
||||
/* build an index */
|
||||
IndexWriter writer = new IndexWriter(index.index, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
|
||||
IndexWriter writer = new IndexWriter(index.index, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
for (int d = minId; d <= maxId; d++) {
|
||||
|
|
|
@ -15,7 +15,7 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT;
|
||||
|
||||
/**
|
||||
* Copyright 2005 Apache Software Foundation
|
||||
|
@ -200,7 +200,7 @@ public class QueryUtils {
|
|||
private static RAMDirectory makeEmptyIndex(final int numDeletedDocs)
|
||||
throws IOException {
|
||||
RAMDirectory d = new RAMDirectory();
|
||||
IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter w = new IndexWriter(d, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < numDeletedDocs; i++) {
|
||||
w.addDocument(new Document());
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.store.RAMDirectory;
|
|||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/** Test BooleanQuery2 against BooleanQuery by overriding the standard query parser.
|
||||
* This also tests the scoring order of BooleanQuery.
|
||||
|
@ -51,7 +50,7 @@ public class TestBoolean2 extends LuceneTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < docFields.length; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
@ -68,14 +67,14 @@ public class TestBoolean2 extends LuceneTestCase {
|
|||
int docCount = 0;
|
||||
do {
|
||||
final Directory copy = new RAMDirectory(dir2);
|
||||
IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
w.addIndexesNoOptimize(new Directory[] {copy});
|
||||
docCount = w.maxDoc();
|
||||
w.close();
|
||||
mulFactor *= 2;
|
||||
} while(docCount < 3000);
|
||||
|
||||
IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir2, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
|
||||
for(int i=0;i<NUM_EXTRA_DOCS/2;i++) {
|
||||
|
@ -107,7 +106,7 @@ public class TestBoolean2 extends LuceneTestCase {
|
|||
};
|
||||
|
||||
public Query makeQuery(String queryText) throws ParseException {
|
||||
Query q = (new QueryParser(Version.LUCENE_CURRENT, field, new WhitespaceAnalyzer(Version.LUCENE_CURRENT))).parse(queryText);
|
||||
Query q = (new QueryParser(TEST_VERSION_CURRENT, field, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))).parse(queryText);
|
||||
return q;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.lucene.search;
|
|||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -60,7 +59,7 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
|
|||
|
||||
index = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(index,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
|
|
|
@ -135,7 +135,7 @@ public class TestBooleanOr extends LuceneTestCase {
|
|||
RAMDirectory rd = new RAMDirectory();
|
||||
|
||||
//
|
||||
IndexWriter writer = new IndexWriter(rd, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(rd, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
//
|
||||
Document d = new Document();
|
||||
|
|
|
@ -18,8 +18,6 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import junit.framework.Test;
|
||||
import junit.framework.TestSuite;
|
||||
import junit.textui.TestRunner;
|
||||
|
@ -81,7 +79,7 @@ public class TestBooleanPrefixQuery extends LuceneTestCase {
|
|||
Query rw2 = null;
|
||||
IndexReader reader = null;
|
||||
try {
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < categories.length; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
|||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
||||
public class TestBooleanQuery extends LuceneTestCase {
|
||||
|
@ -61,7 +60,7 @@ public class TestBooleanQuery extends LuceneTestCase {
|
|||
// LUCENE-1630
|
||||
public void testNullOrSubScorer() throws Throwable {
|
||||
Directory dir = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestBooleanScorer extends LuceneTestCase
|
||||
{
|
||||
|
@ -45,7 +44,7 @@ public class TestBooleanScorer extends LuceneTestCase
|
|||
String[] values = new String[] { "1", "2", "3", "4" };
|
||||
|
||||
try {
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.lucene.util.OpenBitSetDISI;
|
|||
public class TestCachingWrapperFilter extends LuceneTestCase {
|
||||
public void testCachingWorks() throws Exception {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
|
@ -71,7 +71,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
|
|||
|
||||
public void testIsCacheAble() throws Exception {
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.close();
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, true);
|
||||
|
|
|
@ -70,7 +70,7 @@ implements Serializable {
|
|||
private Directory getIndex()
|
||||
throws IOException {
|
||||
RAMDirectory indexStore = new RAMDirectory ();
|
||||
IndexWriter writer = new IndexWriter (indexStore, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter (indexStore, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
RandomGen random = new RandomGen(newRandom());
|
||||
for (int i=0; i<INDEX_SIZE; ++i) { // don't decrease; if to low the problem doesn't show up
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -51,7 +50,7 @@ public class TestDateFilter
|
|||
{
|
||||
// create an index
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
|
||||
|
@ -112,7 +111,7 @@ public class TestDateFilter
|
|||
{
|
||||
// create an index
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
long now = System.currentTimeMillis();
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.lucene.search.Sort;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Test date sorting, i.e. auto-sorting of fields with type "long".
|
||||
|
@ -51,7 +50,7 @@ public class TestDateSort extends LuceneTestCase {
|
|||
super.setUp();
|
||||
// Create an index writer.
|
||||
directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// oldest doc:
|
||||
|
@ -76,7 +75,7 @@ public class TestDateSort extends LuceneTestCase {
|
|||
|
||||
Sort sort = new Sort(new SortField(DATE_TIME_FIELD, SortField.STRING, true));
|
||||
|
||||
QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, TEXT_FIELD, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, TEXT_FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
Query query = queryParser.parse("Document");
|
||||
|
||||
// Execute the search and process the search results.
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -80,7 +79,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase{
|
|||
|
||||
index = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(index,
|
||||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setSimilarity(sim);
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.lucene.search;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
import org.apache.lucene.document.*;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
@ -40,7 +39,7 @@ public class TestDocBoost extends LuceneTestCase {
|
|||
|
||||
public void testDocBoost() throws Exception {
|
||||
RAMDirectory store = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
|
||||
Fieldable f2 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.lucene.index.IndexWriter.MaxFieldLength;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestDocIdSet extends LuceneTestCase {
|
||||
|
@ -106,7 +105,7 @@ public class TestDocIdSet extends LuceneTestCase {
|
|||
// Tests that if a Filter produces a null DocIdSet, which is given to
|
||||
// IndexSearcher, everything works fine. This came up in LUCENE-1754.
|
||||
Directory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("c", "val", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
|
||||
writer.addDocument(doc);
|
||||
|
|
|
@ -23,8 +23,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
@ -36,7 +34,7 @@ public class TestElevationComparator extends LuceneTestCase {
|
|||
//@Test
|
||||
public void testSorting() throws Throwable {
|
||||
Directory directory = new MockRAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer.setMaxBufferedDocs(2);
|
||||
writer.setMergeFactor(1000);
|
||||
writer.addDocument(adoc(new String[] {"id", "a", "title", "ipod", "str_s", "a"}));
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.search.spans.SpanQuery;
|
|||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests primitive queries (ie: that rewrite to themselves) to
|
||||
|
@ -52,7 +51,7 @@ public class TestExplanations extends LuceneTestCase {
|
|||
public static final String KEY = "KEY";
|
||||
public static final String FIELD = "field";
|
||||
public static final QueryParser qp =
|
||||
new QueryParser(Version.LUCENE_CURRENT, FIELD, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
|
||||
new QueryParser(TEST_VERSION_CURRENT, FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
|
@ -64,7 +63,7 @@ public class TestExplanations extends LuceneTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
for (int i = 0; i < docFields.length; i++) {
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -23,8 +23,6 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
|
@ -41,7 +39,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
long theLong = Long.MAX_VALUE;
|
||||
double theDouble = Double.MAX_VALUE;
|
||||
byte theByte = Byte.MAX_VALUE;
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.analysis.SimpleAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* A basic 'positive' Unit test class for the FieldCacheRangeFilter class.
|
||||
|
@ -532,7 +531,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
// test using a sparse index (with deleted docs). The DocIdSet should be not cacheable, as it uses TermDocs if the range contains 0
|
||||
public void testSparseIndex() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(Version.LUCENE_CURRENT), T, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), T, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
for (int d = -20; d <= 20; d++) {
|
||||
Document doc = new Document();
|
||||
|
|
|
@ -27,8 +27,6 @@ import org.apache.lucene.search.BooleanClause.Occur;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.DocIdBitSet;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.util.BitSet;
|
||||
|
||||
/**
|
||||
|
@ -50,7 +48,7 @@ public class TestFilteredQuery extends LuceneTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add (new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
|
||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.lucene.search;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import org.apache.lucene.analysis.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -51,13 +49,13 @@ public class TestFilteredSearch extends LuceneTestCase {
|
|||
RAMDirectory directory = new RAMDirectory();
|
||||
int[] filterBits = {1, 36};
|
||||
SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
searchFiltered(writer, directory, filter, enforceSingleSegment);
|
||||
// run the test on more than one segment
|
||||
enforceSingleSegment = false;
|
||||
// reset - it is stateful
|
||||
filter.reset();
|
||||
writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
// we index 60 docs - this will create 6 segments
|
||||
writer.setMaxBufferedDocs(10);
|
||||
searchFiltered(writer, directory, filter, enforceSingleSegment);
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.lucene.store.RAMDirectory;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.queryParser.QueryParser;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests {@link FuzzyQuery}.
|
||||
|
@ -43,7 +42,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
|
||||
public void testFuzziness() throws Exception {
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc("aaaaa", writer);
|
||||
addDoc("aaaab", writer);
|
||||
addDoc("aaabb", writer);
|
||||
|
@ -200,7 +199,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
|
||||
public void testFuzzinessLong() throws Exception {
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc("aaaaaaa", writer);
|
||||
addDoc("segment", writer);
|
||||
writer.optimize();
|
||||
|
@ -288,7 +287,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
|
||||
public void testTokenLengthOpt() throws IOException {
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
|
||||
true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
addDoc("12345678911", writer);
|
||||
addDoc("segment", writer);
|
||||
|
@ -320,7 +319,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
|
||||
public void testGiga() throws Exception {
|
||||
|
||||
StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
|
||||
StandardAnalyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
Directory index = new MockRAMDirectory();
|
||||
IndexWriter w = new IndexWriter(index, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
|
@ -345,7 +344,7 @@ public class TestFuzzyQuery extends LuceneTestCase {
|
|||
IndexReader r = w.getReader();
|
||||
w.close();
|
||||
|
||||
Query q = new QueryParser(Version.LUCENE_CURRENT, "field", analyzer).parse( "giga~0.9" );
|
||||
Query q = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer).parse( "giga~0.9" );
|
||||
|
||||
// 3. search
|
||||
IndexSearcher searcher = new IndexSearcher(r);
|
||||
|
|
|
@ -29,14 +29,13 @@ import org.apache.lucene.queryParser.QueryParser;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests MatchAllDocsQuery.
|
||||
*
|
||||
*/
|
||||
public class TestMatchAllDocsQuery extends LuceneTestCase {
|
||||
private Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
|
||||
private Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
|
||||
|
||||
public void testQuery() throws Exception {
|
||||
|
||||
|
@ -100,7 +99,7 @@ public class TestMatchAllDocsQuery extends LuceneTestCase {
|
|||
assertEquals(2, hits.length);
|
||||
|
||||
// test parsable toString()
|
||||
QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "key", analyzer);
|
||||
QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", analyzer);
|
||||
hits = is.search(qp.parse(new MatchAllDocsQuery().toString()), null, 1000).scoreDocs;
|
||||
assertEquals(2, hits.length);
|
||||
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Collections;
|
||||
|
@ -47,7 +45,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
|
|||
|
||||
public void testPhrasePrefix() throws IOException {
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
add("blueberry pie", writer);
|
||||
add("blueberry strudel", writer);
|
||||
add("blueberry pizza", writer);
|
||||
|
@ -141,7 +139,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
|
|||
// The contained PhraseMultiQuery must contain exactly one term array.
|
||||
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
add("blueberry pie", writer);
|
||||
add("blueberry chewing gum", writer);
|
||||
add("blue raspberry pie", writer);
|
||||
|
@ -169,7 +167,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase
|
|||
|
||||
public void testPhrasePrefixWithBooleanQuery() throws IOException {
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writer = new IndexWriter(indexStore, new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
add("This is a test", "object", writer);
|
||||
add("a note", "note", writer);
|
||||
writer.close();
|
||||
|
|
|
@ -30,8 +30,6 @@ import org.apache.lucene.queryParser.QueryParser;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.store.MockRAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
|
@ -84,9 +82,9 @@ public class TestMultiSearcher extends LuceneTestCase
|
|||
lDoc3.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
||||
// creating an index writer for the first index
|
||||
IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
// creating an index writer for the second index, but writing nothing
|
||||
IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
//--------------------------------------------------------------------
|
||||
// scenario 1
|
||||
|
@ -103,7 +101,7 @@ public class TestMultiSearcher extends LuceneTestCase
|
|||
writerB.close();
|
||||
|
||||
// creating the query
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "fulltext", new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
Query query = parser.parse("handle:1");
|
||||
|
||||
// building the searchables
|
||||
|
@ -130,7 +128,7 @@ public class TestMultiSearcher extends LuceneTestCase
|
|||
//--------------------------------------------------------------------
|
||||
|
||||
// adding one document to the empty index
|
||||
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writerB.addDocument(lDoc);
|
||||
writerB.optimize();
|
||||
writerB.close();
|
||||
|
@ -176,7 +174,7 @@ public class TestMultiSearcher extends LuceneTestCase
|
|||
readerB.close();
|
||||
|
||||
// optimizing the index with the writer
|
||||
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
|
||||
writerB.optimize();
|
||||
writerB.close();
|
||||
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.apache.lucene.queryParser.ParseException;
|
|||
import org.apache.lucene.queryParser.QueryParser;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
|
@ -88,7 +86,7 @@ public class TestMultiSearcherRanking extends LuceneTestCase {
|
|||
private void checkQuery(String queryStr) throws IOException, ParseException {
|
||||
// check result hit ranking
|
||||
if(verbose) System.out.println("Query: " + queryStr);
|
||||
QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
|
||||
QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION_CURRENT));
|
||||
Query query = queryParser.parse(queryStr);
|
||||
ScoreDoc[] multiSearcherHits = multiSearcher.search(query, null, 1000).scoreDocs;
|
||||
ScoreDoc[] singleSearcherHits = singleSearcher.search(query, null, 1000).scoreDocs;
|
||||
|
@ -115,12 +113,12 @@ public class TestMultiSearcherRanking extends LuceneTestCase {
|
|||
super.setUp();
|
||||
// create MultiSearcher from two seperate searchers
|
||||
Directory d1 = new RAMDirectory();
|
||||
IndexWriter iw1 = new IndexWriter(d1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iw1 = new IndexWriter(d1, new StandardAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
addCollection1(iw1);
|
||||
iw1.close();
|
||||
Directory d2 = new RAMDirectory();
|
||||
IndexWriter iw2 = new IndexWriter(d2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iw2 = new IndexWriter(d2, new StandardAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
addCollection2(iw2);
|
||||
iw2.close();
|
||||
|
@ -132,7 +130,7 @@ public class TestMultiSearcherRanking extends LuceneTestCase {
|
|||
|
||||
// create IndexSearcher which contains all documents
|
||||
Directory d = new RAMDirectory();
|
||||
IndexWriter iw = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true,
|
||||
IndexWriter iw = new IndexWriter(d, new StandardAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
addCollection1(iw);
|
||||
addCollection2(iw);
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.Collator;
|
||||
import java.util.Locale;
|
||||
|
@ -66,7 +64,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
|
|||
"X 4 5 6" };
|
||||
|
||||
small = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(small, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
|
||||
IndexWriter writer = new IndexWriter(small, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
|
@ -617,7 +615,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
|
|||
|
||||
/* build an index */
|
||||
RAMDirectory farsiIndex = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(farsiIndex, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
|
||||
IndexWriter writer = new IndexWriter(farsiIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
|
||||
|
@ -657,7 +655,7 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter {
|
|||
|
||||
/* build an index */
|
||||
RAMDirectory danishIndex = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(danishIndex, new SimpleAnalyzer(Version.LUCENE_CURRENT), T,
|
||||
IndexWriter writer = new IndexWriter(danishIndex, new SimpleAnalyzer(TEST_VERSION_CURRENT), T,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
|
||||
// Danish collation orders the words below in the given order
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.analysis.SimpleAnalyzer;
|
||||
import org.apache.lucene.document.*;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
@ -42,7 +41,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase {
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
IndexWriter writer
|
||||
= new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
= new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
//writer.setUseCompoundFile(false);
|
||||
//writer.infoStream = System.out;
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.lucene.index.IndexWriter;
|
|||
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
|
||||
|
||||
|
@ -44,7 +43,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
|
|||
final Random rnd = newRandom();
|
||||
|
||||
RAMDirectory directory = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
|
||||
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
|
||||
|
||||
DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue