LUCENE-3378: some contribs depend on core tests compiled

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1158697 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2011-08-17 13:44:23 +00:00
parent dfd24c6424
commit f06edac7c5
10 changed files with 43 additions and 45 deletions

View File

@ -31,9 +31,6 @@ New Features
* LUCENE-2507: Added DirectSpellChecker, which retrieves correction candidates directly
from the term dictionary using levenshtein automata. (Robert Muir)
* LUCENE-2836: Add FieldCacheRewriteMethod, which rewrites MultiTermQueries
using the FieldCache's TermsEnum. (Robert Muir)
API Changes

View File

@ -41,7 +41,6 @@
<path id="test.base.classpath">
<path refid="classpath"/>
<pathelement location="${common.dir}/build/classes/test-framework"/>
<pathelement location="${common.dir}/build/classes/test"/>
<path refid="junit-path"/>
<pathelement location="${build.dir}/classes/java"/>
</path>
@ -284,4 +283,4 @@
</ant>
<property name="suggest.uptodate" value="true"/>
</target>
</project>
</project>

View File

@ -45,17 +45,17 @@ public class TestIndexSplitter extends LuceneTestCase {
setMergePolicy(mergePolicy)
);
for (int x=0; x < 100; x++) {
Document doc = TestIndexWriterReader.createDocument(x, "index", 5);
Document doc = DocHelper.createDocument(x, "index", 5);
iw.addDocument(doc);
}
iw.commit();
for (int x=100; x < 150; x++) {
Document doc = TestIndexWriterReader.createDocument(x, "index2", 5);
Document doc = DocHelper.createDocument(x, "index2", 5);
iw.addDocument(doc);
}
iw.commit();
for (int x=150; x < 200; x++) {
Document doc = TestIndexWriterReader.createDocument(x, "index3", 5);
Document doc = DocHelper.createDocument(x, "index3", 5);
iw.addDocument(doc);
}
iw.commit();

View File

@ -29,6 +29,9 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.store.Directory;
@ -251,4 +254,21 @@ class DocHelper {
public static int numFields(Document doc) {
return doc.getFields().size();
}
public static Document createDocument(int n, String indexName, int numFields) {
StringBuilder sb = new StringBuilder();
Document doc = new Document();
doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
sb.append("a");
sb.append(n);
doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
sb.append(" b");
sb.append(n);
for (int i = 1; i < numFields; i++) {
doc.add(new Field("field" + (i + 1), sb.toString(), Store.YES,
Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
}
return doc;
}
}

View File

@ -74,7 +74,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
for (int i = 0; i < 97 ; i++) {
IndexReader reader = writer.getReader();
if (i == 0) {
writer.addDocument(createDocument(i, "x", 1 + random.nextInt(5)));
writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
} else {
int previous = random.nextInt(i);
// a check if the reader is current here could fail since there might be
@ -83,10 +83,10 @@ public class TestIndexWriterReader extends LuceneTestCase {
case 0:
case 1:
case 2:
writer.addDocument(createDocument(i, "x", 1 + random.nextInt(5)));
writer.addDocument(DocHelper.createDocument(i, "x", 1 + random.nextInt(5)));
break;
case 3:
writer.updateDocument(new Term("id", "" + previous), createDocument(
writer.updateDocument(new Term("id", "" + previous), DocHelper.createDocument(
previous, "x", 1 + random.nextInt(5)));
break;
case 4:
@ -105,7 +105,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
writer = new IndexWriter(dir1, iwc);
assertTrue(reader.isCurrent());
writer.addDocument(createDocument(1, "x", 1+random.nextInt(5)));
writer.addDocument(DocHelper.createDocument(1, "x", 1+random.nextInt(5)));
assertTrue(reader.isCurrent()); // segments in ram but IW is different to the readers one
writer.close();
assertFalse(reader.isCurrent()); // segments written
@ -422,7 +422,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
addDir = newDirectory();
IndexWriter writer = new IndexWriter(addDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
for (int i = 0; i < NUM_INIT_DOCS; i++) {
Document doc = createDocument(i, "addindex", 4);
Document doc = DocHelper.createDocument(i, "addindex", 4);
writer.addDocument(doc);
}
@ -543,7 +543,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
assertEquals(r2.maxDoc(), 100);
// add 100 documents
for (int x = 10000; x < 10000 + 100; x++) {
Document d = createDocument(x, "index1", 5);
Document d = DocHelper.createDocument(x, "index1", 5);
writer.addDocument(d);
}
writer.flush(false, true);
@ -575,25 +575,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
dir1.close();
}
public static Document createDocument(int n, String indexName, int numFields) {
StringBuilder sb = new StringBuilder();
Document doc = new Document();
doc.add(new Field("id", Integer.toString(n), Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("indexname", indexName, Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
sb.append("a");
sb.append(n);
doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
sb.append(" b");
sb.append(n);
for (int i = 1; i < numFields; i++) {
doc.add(new Field("field" + (i + 1), sb.toString(), Store.YES,
Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
}
return doc;
}
/*
* Delete a document by term and return the doc id
*
@ -609,7 +591,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setMergePolicy(new LogDocMergePolicy()));
for (int i = 0; i < 100; i++) {
w.addDocument(createDocument(i, indexName, 4));
w.addDocument(DocHelper.createDocument(i, indexName, 4));
if (multiSegment && (i % 10) == 0) {
}
}
@ -622,7 +604,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
public static void createIndexNoClose(boolean multiSegment, String indexName,
IndexWriter w) throws IOException {
for (int i = 0; i < 100; i++) {
w.addDocument(createDocument(i, indexName, 4));
w.addDocument(DocHelper.createDocument(i, indexName, 4));
}
if (!multiSegment) {
w.optimize();
@ -662,14 +644,14 @@ public class TestIndexWriterReader extends LuceneTestCase {
int num = atLeast(100);
for (int i = 0; i < num; i++) {
writer.addDocument(createDocument(i, "test", 4));
writer.addDocument(DocHelper.createDocument(i, "test", 4));
}
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
assertTrue(warmer.warmCount > 0);
final int count = warmer.warmCount;
writer.addDocument(createDocument(17, "test", 4));
writer.addDocument(DocHelper.createDocument(17, "test", 4));
writer.optimize();
assertTrue(warmer.warmCount > count);
@ -695,7 +677,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
assertEquals(100, r1.numDocs());
for (int i = 0; i < 10; i++) {
writer.addDocument(createDocument(i, "test", 4));
writer.addDocument(DocHelper.createDocument(i, "test", 4));
}
((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync();
@ -857,7 +839,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
do {
try {
for(int docUpto=0;docUpto<10;docUpto++) {
writer.addDocument(createDocument(10*count+docUpto, "test", 4));
writer.addDocument(DocHelper.createDocument(10*count+docUpto, "test", 4));
}
count++;
final int limit = count*10;

View File

@ -91,7 +91,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase {
//int n = random.nextInt(2);
if (type == 0) {
int i = seq.addAndGet(1);
Document doc = TestIndexWriterReader.createDocument(i, "index1", 10);
Document doc = DocHelper.createDocument(i, "index1", 10);
writer.addDocument(doc);
addCount++;
} else if (type == 1) {

View File

@ -46,14 +46,14 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
IndexWriter writer = new IndexWriter(dir, iwc);
writer.setInfoStream(VERBOSE ? System.out : null);
for (int x = 0; x < 5; x++) {
writer.addDocument(TestIndexWriterReader.createDocument(x, "1", 2));
writer.addDocument(DocHelper.createDocument(x, "1", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
//System.out.println("commit1");
writer.commit();
assertEquals(1, writer.segmentInfos.size());
for (int x = 5; x < 10; x++) {
writer.addDocument(TestIndexWriterReader.createDocument(x, "2", 2));
writer.addDocument(DocHelper.createDocument(x, "2", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
//System.out.println("commit2");
@ -61,7 +61,7 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
assertEquals(2, writer.segmentInfos.size());
for (int x = 10; x < 15; x++) {
writer.addDocument(TestIndexWriterReader.createDocument(x, "3", 2));
writer.addDocument(DocHelper.createDocument(x, "3", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
@ -174,12 +174,12 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
**/
void part2(IndexWriter writer, RangeMergePolicy fsmp) throws Exception {
for (int x = 20; x < 25; x++) {
writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
writer.addDocument(DocHelper.createDocument(x, "5", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
writer.flush(false, false);
for (int x = 25; x < 30; x++) {
writer.addDocument(TestIndexWriterReader.createDocument(x, "5", 2));
writer.addDocument(DocHelper.createDocument(x, "5", 2));
//System.out.println("numRamDocs(" + x + ")" + writer.numRamDocs());
}
writer.flush(false, false);